text
stringlengths
64
2.42M
id
stringlengths
10
118
metadata
dict
__index_level_0__
int64
0
65
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of convolution tiles mapped to GEMM A (activation tile) matrix from memory. This iterator assumes TensorNDHWC layout of tensors in Global Memory. The iterator is specialized for each of the three convolution operators: forward propagation (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad). */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/matrix_shape.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/conv/threadblock/conv3d_params.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename Element_, typename ThreadMap_ > class Conv3dFpropActivationTileAccessIteratorAnalytic { public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = layout::TensorNDHWC; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; using TensorRef = cutlass::TensorRef<Element, Layout>; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic; static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided; static int const kConvDim = 3; using ConvProblemSize = typename conv::Conv3dProblemSize; static int const kAccessesPerVector = 1; // // Simplifying assertions // static_assert(ThreadMap::Iterations::kContiguous == 1, "Require Iterations::kContiguous == 1"); // // Parameters structure // using Params = Conv3dAnalyticParams<Layout>; private: Params const &params_; ConvProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; char const *pointer_; int filter_t_; int filter_r_; int filter_s_; int filter_c_; int offset_n_[ThreadMap::Iterations::kStrided]; int offset_z_[ThreadMap::Iterations::kStrided]; int offset_p_[ThreadMap::Iterations::kStrided]; int offset_q_[ThreadMap::Iterations::kStrided]; public: CUTLASS_HOST_DEVICE Conv3dFpropActivationTileAccessIteratorAnalytic( Params const &params, ConvProblemSize const &problem_size, Element const *ptr, int thread_idx, MatrixCoord const &threadblock_offset = MatrixCoord() // tile index - units are threadblock-scoped tiles ): params_(params), problem_size_(problem_size), pointer_(reinterpret_cast<char const *>(ptr)), filter_t_(0), filter_r_(0), filter_s_(0), filter_c_(0) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); filter_c_ = threadblock_offset.column() + thread_coord.contiguous(); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { int offset_nzpq = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided; offset_n_[s] = offset_nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q); int residual = offset_nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q); offset_z_[s] = residual / (problem_size_.P * problem_size_.Q); residual = residual % (problem_size_.P * problem_size_.Q); offset_p_[s] = residual / problem_size_.Q; offset_q_[s] = residual % problem_size_.Q; } set_iteration_index(0); } CUTLASS_HOST_DEVICE static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) { return Params(problem_size, layout); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; iteration_strided_ = index / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_HOST_DEVICE void advance() { // moves to the next tile ++filter_s_; if (filter_s_ < problem_size_.S) { return; } filter_s_ = 0; ++filter_r_; if (filter_r_ < problem_size_.R) { return; } filter_r_ = 0; ++filter_t_; if (filter_t_ < problem_size_.T) { return; } filter_t_ = 0; filter_c_ += Shape::kColumn * problem_size_.split_k_slices; } /// Returns the coordinate in the activations tensor X that is currently pointed to /// by the iterator. CUTLASS_HOST_DEVICE TensorCoord at() const { int n = offset_n_[iteration_strided_]; int z = offset_z_[iteration_strided_]; int p = offset_p_[iteration_strided_]; int q = offset_q_[iteration_strided_]; int t = filter_t_; int r = filter_r_; int s = filter_s_; if (problem_size_.mode == Mode::kConvolution) { t = (problem_size_.T - 1 - filter_t_); r = (problem_size_.R - 1 - filter_r_); s = (problem_size_.S - 1 - filter_s_); } int d = z * problem_size_.stride_d - problem_size_.pad_d + t * problem_size_.dilation_d; int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h; int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w; return TensorCoord(n, d, h, w, filter_c_); } /// Returns true if the current coordinate is within the activations tensor X CUTLASS_HOST_DEVICE bool valid() const { TensorCoord coord = at(); return coord.n() < problem_size_.N && coord.d() >= 0 && coord.d() < problem_size_.D && coord.h() >= 0 && coord.h() < problem_size_.H && coord.w() >= 0 && coord.w() < problem_size_.W && coord.c() < problem_size_.C; } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_HOST_DEVICE AccessType const *get() const { TensorCoord coord = at(); LongIndex offset = params_.layout(coord); AccessType const *ptr = reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8); return ptr; } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv3dFpropActivationTileAccessIteratorAnalytic &operator++() { ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(ConvProblemSize const &problem_size) { // check alignment constraint on iterator's contiguous dimension if (problem_size.C % AccessType::kElements) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_analytic.h/0
{ "file_path": "include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_analytic.h", "repo_id": "include", "token_count": 3215 }
20
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data layout of the global memory fragments, data types, and internal tile sizes. Partial specializations for threadblock::Mma operations targeting depthwise related simt instructions. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/gemm/warp/mma.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/warp/mma_depthwise_simt.h" #include "cutlass/gemm/threadblock/mma_pipelined.h" #include "cutlass/gemm/threadblock/mma_singlestage.h" #include "cutlass/gemm/threadblock/mma_base.h" #include "cutlass/conv/threadblock/depthwise_mma_base.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear_direct_conv.h" #include "cutlass/arch/cache_operation.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { namespace detail { // // Convert a WarpShapeM which is the whole tile of elements into the number of elements (2D) held by // each partitions within warp. // The goal is for each thread's tile of elements to be as square as // possible for performance (4x4 will be faster than 2x8). template<int WarpShapeM, // The number of elements (1D) contained in the entire warp int WarpNumThreadsM> // The number of partitions within the warp struct SimtWarpShape { // kP * kQ * WarpNumThreadsM = WarpShapeM // If needed, enable more specializations. }; template <> struct SimtWarpShape<4, 4> { static constexpr int kP = 1; static constexpr int kQ = 1; }; template <> struct SimtWarpShape<4, 2> { static constexpr int kP = 2; static constexpr int kQ = 1; }; template <> struct SimtWarpShape<4, 1> { static constexpr int kP = 2; static constexpr int kQ = 2; }; template <> struct SimtWarpShape<8, 1> { static constexpr int kP = 2; static constexpr int kQ = 4; }; template <> struct SimtWarpShape<8, 2> { static constexpr int kP = 2; static constexpr int kQ = 2; }; template <> struct SimtWarpShape<8, 4> { static constexpr int kP = 1; static constexpr int kQ = 2; }; template <> struct SimtWarpShape<16, 1> { static constexpr int kP = 4; static constexpr int kQ = 4; }; template <> struct SimtWarpShape<16, 2> { static constexpr int kP = 2; static constexpr int kQ = 4; }; template <> struct SimtWarpShape<16, 4> { static constexpr int kP = 2; static constexpr int kQ = 2; }; template <int WarpNumThreadsM> struct SimtWarpShape<25, WarpNumThreadsM> { static_assert(WarpNumThreadsM == 1, "WarpShapeM could not be evenly splited by threads"); static constexpr int kP = 5; static constexpr int kQ = 5; }; template <> struct SimtWarpShape<32, 1> { static constexpr int kP = 4; static constexpr int kQ = 8; }; template <> struct SimtWarpShape<32, 2> { static constexpr int kP = 4; static constexpr int kQ = 4; }; template <> struct SimtWarpShape<32, 4> { static constexpr int kP = 2; static constexpr int kQ = 4; }; } // namespace detail template < /// Shape of threadblock-scoped matrix multiply operator typename Shape, /// Shape of warp-level matrix multiply operator typename WarpShape, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape, /// Element data type of A operand typename ElementA, /// Layout of operand A typename LayoutA, /// Element data type of B operand typename ElementB, /// Layout of operand B typename LayoutB, /// Data type of accumulator typename ElementC, /// Layout of accumulator typename LayoutC, /// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp) typename OperatorClass, /// Size of a warp-scoped per thread access int kLaneAccessSizeA_ = 0, /// Size of a warp-scoped per thread access int kLaneAccessSizeB_ = 0, /// Number of stages int Stages = 2, /// Operation performed by MMA typename Operator = typename platform::conditional< (platform::is_same<OperatorClass, cutlass::arch::OpClassTensorOp>::value) && (platform::is_same<ElementA, int8_t>::value || platform::is_same<ElementA, int4b_t>::value || platform::is_same<ElementA, uint8_t>::value || platform::is_same<ElementA, uint4b_t>::value), cutlass::arch::OpMultiplyAddSaturate, cutlass::arch::OpMultiplyAdd>::type, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor = false, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA = cutlass::arch::CacheOperation::Global, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB = cutlass::arch::CacheOperation::Global, /// per-element transformation for elements of A ComplexTransform TransformA = ComplexTransform::kNone, /// per-element transformation for elements of B ComplexTransform TransformB = ComplexTransform::kNone, bool IsComplex = false // (is_complex<ElementA>::value || is_complex<ElementB>::value) > struct DepthwiseMmaCoreWithLaneAccessSize; ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Shape of threadblock-scoped matrix multiply operator typename Shape, /// Shape of threadblock-scoped output tile typename ThreadBlockOutputShape, /// Shape of filter shape per threadblock typename FilterShape, /// Shape of warp-level matrix multiply operator typename WarpShape, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape, /// Element data type of A operand typename ElementA, /// Layout of operand A typename LayoutA, /// Element data type of B operand typename ElementB, /// Layout of operand B typename LayoutB, /// Data type of accumulator typename ElementC, /// Layout of accumulator typename LayoutC, /// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp) typename OperatorClass, /// Size of a warp-scoped per thread access int kLaneAccessSizeA_ = 0, /// Size of a warp-scoped per thread access int kLaneAccessSizeB_ = 0, /// Number of stages int Stages = 2, /// Operation performed by MMA typename Operator = typename platform::conditional< (platform::is_same<OperatorClass, cutlass::arch::OpClassTensorOp>::value) && (platform::is_same<ElementA, int8_t>::value || platform::is_same<ElementA, int4b_t>::value || platform::is_same<ElementA, uint8_t>::value || platform::is_same<ElementA, uint4b_t>::value), cutlass::arch::OpMultiplyAddSaturate, cutlass::arch::OpMultiplyAdd>::type, /// Iterator algo type conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic, /// Stride ( MatrixShape<Height, Width> ) typename StrideShape = cutlass::MatrixShape<-1, -1>, /// Dilation ( MatrixShape<Height, Width> ) typename DilationShape = cutlass::MatrixShape<-1, -1>, /// Activation Shape loaded by threadblock typename ActivationShape = cutlass::conv::TensorNHWCShape<-1,-1,-1,-1>, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor = false, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA = cutlass::arch::CacheOperation::Global, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB = cutlass::arch::CacheOperation::Global, /// per-element transformation for elements of A ComplexTransform TransformA = ComplexTransform::kNone, /// per-element transformation for elements of B ComplexTransform TransformB = ComplexTransform::kNone, bool IsComplex = false // (is_complex<ElementA>::value || is_complex<ElementB>::value) > struct DepthwiseDirectConvMmaCoreWithLaneAccessSize; ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Shape of threadblock-scoped matrix multiply operator typename Shape, /// Shape of warp-level matrix multiply operator typename WarpShape, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape, /// Element data type of A operand typename ElementA, /// Layout of operand A typename LayoutA, /// Element data type of B operand typename ElementB, /// Layout of operand B typename LayoutB, /// Data type of accumulator typename ElementC, /// Layout of accumulator typename LayoutC, /// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp) typename OperatorClass, /// Number of stages int Stages, /// Operation performed by MMA typename Operator, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// per-element transformation for elements of A ComplexTransform TransformA, /// per-element transformation for elements of B ComplexTransform TransformB, bool IsComplex > struct DepthwiseMmaCoreWithLaneAccessSize< Shape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, OperatorClass, -1, -1, Stages, Operator, AccumulatorsInRowMajor, CacheOpA, CacheOpB, TransformA, TransformB, IsComplex > : cutlass::gemm::threadblock::DefaultMmaCore< Shape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, OperatorClass, Stages, Operator, AccumulatorsInRowMajor, CacheOpA, CacheOpB, TransformA, TransformB, IsComplex > {}; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: column-major /// Operator: simt class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Size of a warp-scoped per thread access (a value of -1 indicates the default) int kLaneAccessSizeA_, /// Size of a warp-scoped per thread access (a value of -1 indicates the default) int kLaneAccessSizeB_, /// Operation performed by GEMM typename Operator_> struct DepthwiseMmaCoreWithLaneAccessSize<Shape_, WarpShape_, cutlass::gemm::GemmShape<1, 1, 1>, ElementA_, layout::RowMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, kLaneAccessSizeA_, kLaneAccessSizeB_, 2, Operator_> : public cutlass::gemm::threadblock::DefaultMmaCore<Shape_, WarpShape_, cutlass::gemm::GemmShape<1, 1, 1>, ElementA_, layout::RowMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_> { using Base = cutlass::gemm::threadblock::DefaultMmaCore<Shape_, WarpShape_, cutlass::gemm::GemmShape<1, 1, 1>, ElementA_, layout::RowMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, 2, Operator_>; using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; static int const kLaneAccessSizeA = kLaneAccessSizeA_; static int const kLaneAccessSizeB = kLaneAccessSizeB_; // Divisility requirements static_assert( kLaneAccessSizeA > 0 && kLaneAccessSizeB > 0, "Size of a warp-scoped per thread access should be larger then ZERO" ); /// Default Operator using Operator = Operator_; /// Number of warps present using WarpCount = typename Base::WarpCount; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value; static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory are same as base class // // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = cutlass::gemm::threadblock::detail::simt_get_warp_threads_m<WarpShape>(); static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM; static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = kLaneAccessSizeA / sizeof_bits<ElementA>::value; static const int numElementsB = kLaneAccessSizeB / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); static int const kPaddingM = cutlass::gemm::threadblock::detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementA>::value); static int const kPaddingN = cutlass::gemm::threadblock::detail::simt_transpose_padding(kWarpSize, Shape::kK, sizeof_bits<ElementB>::value); static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN), "Padding must be divisible by Lane"); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::conv::warp::MmaDepthwiseSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) >; /// Policy used to define MmaPipelined using MmaPolicy = cutlass::gemm::threadblock::MmaPolicy< MmaWarpSimt, MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts WarpCount::kK >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: row-major /// Operator: simt class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of threadblock-scoped output tile (concept: TensorNHWCShape) typename ThreadBlockOutputShape_, /// Shape of filter shape per threadblock typename FilterShape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Size of a warp-scoped per thread access int kLaneAccessSizeA_, /// Number of stages int Stages_, /// Operation performed by GEMM typename Operator_> struct DepthwiseDirectConvMmaCoreWithLaneAccessSize<Shape_, ThreadBlockOutputShape_, FilterShape_, WarpShape_, cutlass::gemm::GemmShape<1, 1, 1>, ElementA_, layout::RowMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, kLaneAccessSizeA_, 128, Stages_, Operator_> { using Shape = Shape_; using FilterShape = FilterShape_; using WarpShape = WarpShape_; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; static int const kLaneAccessSizeB = 128; // Divisility requirements static_assert( kLaneAccessSizeB > 0, "Size of a warp-scoped per thread access should be larger then ZERO" ); /// Default Operator using Operator = Operator_; /// Number of warps present using WarpCount = cutlass::gemm::GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, 1 >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; // For Gmem load static int const kElementsPerAccessA = 128 / sizeof_bits<ElementA>::value; static int const kElementsPerAccessB = 128 / sizeof_bits<ElementB>::value; // // Shared memory layouts // using SmemLayoutA = layout::RowMajor; using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kN, 1>, // Set kStrided = 1 because activation shape is runtime value. kThreads, kElementsPerAccessA >; /// ThreadMap of iterator A using SmemThreadMapA = IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIteratorDirectConv< MatrixShape<1, Shape::kN>, // set kRow is 1 because it is a runtime value ElementA, SmemLayoutA, 0, SmemThreadMapA, // was IteratorThreadMapA true // Dynamic iterations. >; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kN, FilterShape::kCount>, kThreads, kElementsPerAccessB >; /// Transpose the ThreadMap of iterator B using SmemThreadMapB = IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIteratorDirectConv< MatrixShape<FilterShape::kCount, Shape::kN>, ElementB, SmemLayoutB, 0, SmemThreadMapB, // was IteratorThreadMapB false // static iterations. >; // // Warp-level matrix multiply operator // // Groups per threads // Fp32: 2 groups // Fp16: 2 groups static const int GroupsPerThread = sizeof(ElementB) > 1 ? 2 : 4; // Define the warp-level op static const int WarpNumThreadsN = cutlass::const_min(WarpShape::kN / GroupsPerThread, kWarpSize); static const int WarpNumThreadsM = kWarpSize / WarpNumThreadsN; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); // Get output P, Q per thread static const int TileP = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kP; static const int TileQ = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kQ; static const int LaneLayout = 1; static const int numElementsB = kLaneAccessSizeB / sizeof_bits<ElementB>::value; static const int LaneN = cutlass::const_min(numElementsB, WarpShape::kN / WarpNumThreadsN); // Define the output tile computed by each thread using ThreadOutputShape = cutlass::conv::TensorNHWCShape<1, TileP, TileQ, LaneN>; // Fetch the channel with same access size static const int LaneM = LaneN; // No paddings static int const kPaddingM = 0; static int const kPaddingN = 0; static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN), "Padding must be divisible by Lane"); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::conv::warp::MmaDepthwiseDirectConvSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> FilterShape, /// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width> ThreadOutputShape, /// Size of the output tile computed by thread - concept: conv::TensorNHWCShape<> ThreadBlockOutputShape_, /// Size of the output tile computed by threadblock - concept: conv::TensorNHWCShape<> ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) >; /// Policy used to define MmaPipelined using MmaPolicy = cutlass::conv::threadblock::DepthwiseDirectConvMmaPolicy< MmaWarpSimt, MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts IteratorThreadMapA, IteratorThreadMapB, WarpCount::kK >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: row-major /// Operator: simt class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of threadblock-scoped output tile (concept: TensorNHWCShape) typename ThreadBlockOutputShape_, /// Shape of filter shape per threadblock typename FilterShape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Size of a warp-scoped per thread access int kLaneAccessSizeA_, /// Number of stages int Stages_, /// Operation performed by GEMM typename Operator_, /// Stride ( MatrixShape<Height, Width> ) typename StrideShape_, /// Dilation ( MatrixShape<Height, Width> ) typename DilationShape_, /// Activation Shape loaded by threadblock typename ActivationShape_> struct DepthwiseDirectConvMmaCoreWithLaneAccessSize<Shape_, ThreadBlockOutputShape_, FilterShape_, WarpShape_, cutlass::gemm::GemmShape<1, 1, 1>, ElementA_, layout::RowMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, kLaneAccessSizeA_, 128, Stages_, Operator_, IteratorAlgorithm::kFixedStrideDilation, StrideShape_, DilationShape_, ActivationShape_> { using Shape = Shape_; using FilterShape = FilterShape_; using WarpShape = WarpShape_; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassSimt; using StrideShape = StrideShape_; using DilationShape = DilationShape_; using ThreadBlockOutputShape = ThreadBlockOutputShape_; using ActivationShape = ActivationShape_; static int const kLaneAccessSizeB = 128; // Divisility requirements static_assert( kLaneAccessSizeB > 0, "Size of a warp-scoped per thread access should be larger then ZERO" ); /// Default Operator using Operator = Operator_; /// Number of warps present using WarpCount = cutlass::gemm::GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, 1 >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = cutlass::gemm::warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; // For Gmem load static int const kElementsPerAccessA = 128 / sizeof_bits<ElementA>::value; static int const kElementsPerAccessB = 128 / sizeof_bits<ElementB>::value; // // Shared memory layouts // using SmemLayoutA = layout::RowMajor; using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<ActivationShape::kC, ActivationShape::kNHW>, kThreads, kElementsPerAccessA >; /// ThreadMap of iterator A using SmemThreadMapA = IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIteratorDirectConv< MatrixShape<ActivationShape::kNHW, ActivationShape::kC>, ElementA, SmemLayoutA, 0, SmemThreadMapA, // was IteratorThreadMapA false // static iterations. >; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kN, FilterShape::kCount>, kThreads, kElementsPerAccessB >; /// Transpose the ThreadMap of iterator B using SmemThreadMapB = IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIteratorDirectConv< MatrixShape<FilterShape::kCount, Shape::kN>, ElementB, SmemLayoutB, 0, SmemThreadMapB, // was IteratorThreadMapB false // static iterations. >; // // Warp-level matrix multiply operator // // Groups per threads // Fp32: 2 groups // Fp16: 2 groups static const int GroupsPerThread = sizeof(ElementB) > 1 ? 2 : 4; // Define the warp-level op static const int WarpNumThreadsN = cutlass::const_min(WarpShape::kN / GroupsPerThread, kWarpSize); static const int WarpNumThreadsM = kWarpSize / WarpNumThreadsN; static const int TileP = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kP; static const int TileQ = cutlass::conv::threadblock::detail::SimtWarpShape<WarpShape::kM, WarpNumThreadsM>::kQ; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int LaneLayout = 1; static const int numElementsB = kLaneAccessSizeB / sizeof_bits<ElementB>::value; static const int LaneN = cutlass::const_min(numElementsB, WarpShape::kN / WarpNumThreadsN); // Define the output tile computed by each thread using ThreadOutputShape = cutlass::conv::TensorNHWCShape<1, TileP, TileQ, LaneN>; // Fetch the channel with same access size static const int LaneM = LaneN; // No paddings static int const kPaddingM = 0; static int const kPaddingN = 0; static_assert(!(kPaddingM % LaneM) && !(kPaddingN % LaneN), "Padding must be divisible by Lane"); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::conv::warp::MmaDepthwiseDirectConvSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> FilterShape, /// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width> ThreadOutputShape, /// Size of the output tile computed by thread - concept: conv::TensorNHWCShape<> ThreadBlockOutputShape, /// Size of the output tile computed by threadblock - concept: conv::TensorNHWCShape<> ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy, /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy) IteratorAlgorithm::kFixedStrideDilation, /// Iterator algo type StrideShape, /// Stride ( MatrixShape<Height, Width> ) DilationShape, /// Dilation ( MatrixShape<Height, Width> ) ActivationShape /// Activation Shape loaded by threadblock >; /// Policy used to define MmaPipelined using MmaPolicy = cutlass::conv::threadblock::DepthwiseDirectConvMmaPolicy< MmaWarpSimt, MatrixShape<kPaddingM, 0>, // skew for A matrix to avoid SMEM bank conflicts MatrixShape<0, kPaddingN>, // skew for B matrix to avoid SMEM bank conflicts IteratorThreadMapA, IteratorThreadMapB, WarpCount::kK >; }; } // namespace threadblock } // namespace conv } // namespace cutlass
include/cutlass/conv/threadblock/depthwise_mma_core_with_lane_access_size.h/0
{ "file_path": "include/cutlass/conv/threadblock/depthwise_mma_core_with_lane_access_size.h", "repo_id": "include", "token_count": 14729 }
21
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing pipelined epilogues with bias add and elementwise activation functions. This collective is now DEPRECATED, will be removed in the next release. Use EVT instead. */ #pragma once #include "sm90_epilogue_tma_warpspecialized.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace collective { ///////////////////////////////////////////////////////////////////////////////////////////////// template < int StagesC_, int StagesD_, int FragmentSize_, class BlockTileShape_, // (BLK_M,BLK_N,BLK_K) class EpilogueTileShape_, // (EPI_TILE_M,EPI_TILE_N) class ElementC_, class StrideC_, class ElementD_, class StrideD_, class FusionCallbacks_, class CopyOpG2S_, class SmemLayoutAtomC_, class CopyOpS2R_, class CopyOpS2G_, class SmemLayoutAtomD_, class CopyOpR2S_, class CopyAtomC_ > class Sm90EpilogueTmaWarpSpecializedBiasElementwise : public CollectiveEpilogue< Sm90TmaWarpSpecialized<StagesC_, StagesD_, FragmentSize_, false, false>, BlockTileShape_, EpilogueTileShape_, ElementC_, StrideC_, ElementD_, StrideD_, FusionCallbacks_, CopyOpG2S_, SmemLayoutAtomC_, CopyOpS2R_, CopyOpS2G_, SmemLayoutAtomD_, CopyOpR2S_, CopyAtomC_ > { private: using Impl = CollectiveEpilogue< Sm90TmaWarpSpecialized<StagesC_, StagesD_, FragmentSize_, false, false>, BlockTileShape_, EpilogueTileShape_, ElementC_, StrideC_, ElementD_, StrideD_, FusionCallbacks_, CopyOpG2S_, SmemLayoutAtomC_, CopyOpS2R_, CopyOpS2G_, SmemLayoutAtomD_, CopyOpR2S_, CopyAtomC_ >; public: using DispatchPolicy = Sm90TmaWarpSpecializedBiasElementwise<StagesC_, StagesD_, FragmentSize_>; using ElementCompute = typename Impl::ThreadEpilogueOp::ElementCompute; using ElementBias = typename Impl::ThreadEpilogueOp::ElementBias; using ElementT = typename Impl::ThreadEpilogueOp::ElementAux; // Constructor inheritance using Impl::Impl; // Host side epilogue arguments struct [[deprecated("use Sm90TmaWarpSpecialized Arguments instead")]] Arguments { struct ThreadArgs { ElementCompute alpha{1}; ElementCompute beta{0}; ElementCompute const *alpha_ptr{nullptr}; ElementCompute const *beta_ptr{nullptr}; } thread; ElementC_ const* ptr_C{nullptr}; StrideC_ dC{}; ElementD_* ptr_D{nullptr}; StrideD_ dD{}; ElementBias const* ptr_Bias{nullptr}; ElementT* ptr_T{nullptr}; CUTLASS_HOST_DEVICE operator typename Impl::Arguments() const { typename Impl::Arguments arguments; arguments.thread.alpha = thread.alpha; arguments.thread.beta = thread.beta; arguments.thread.alpha_ptr = thread.alpha_ptr; arguments.thread.beta_ptr = thread.beta_ptr; if constexpr (not cute::is_void_v<ElementBias>) { arguments.thread.bias_ptr = ptr_Bias; } if constexpr (not cute::is_void_v<ElementT>) { arguments.thread.aux_ptr = ptr_T; arguments.thread.dAux = dD; } arguments.ptr_C = ptr_C; arguments.dC = dC; arguments.ptr_D = ptr_D; arguments.dD = dD; return arguments; } }; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace collective } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/epilogue/collective/sm90_epilogue_tma_warpspecialized_bias_elementwise.hpp/0
{ "file_path": "include/cutlass/epilogue/collective/sm90_epilogue_tma_warpspecialized_bias_elementwise.hpp", "repo_id": "include", "token_count": 1891 }
22
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing reduction operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a reduction sum to an array of elements. /// /// template < typename Element_, ///< Data type used to load and store tensors int Count ///< Number of elements computed per operation > class ReductionOpPlus { public: using Element = Element_; static int const kCount = Count; using Fragment = Array<Element, kCount>; using Operator = plus<Fragment>; /// Host-constructable parameters structure struct Params { }; private: /// reduction operator Operator operator_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE ReductionOpPlus(Params const &params) { } /// Computes Compute => CUTLASS_HOST_DEVICE Fragment operator()( Fragment const &lhs, Fragment const &rhs) const { return operator_(lhs, rhs); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
include/cutlass/epilogue/thread/reduction_op.h/0
{ "file_path": "include/cutlass/epilogue/thread/reduction_op.h", "repo_id": "include", "token_count": 925 }
23
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "predicated_tile_iterator.h" #include "cutlass/gemm/gemm.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines the optimal thread map for TensorOp accumulator layouts template < typename ThreadblockShape, typename WarpShape, int PartitionsK, typename ElementOutput, int ElementsPerAccess, typename ElementAccumulator > struct DefaultThreadMapVoltaTensorOp; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines the optimal thread map for TensorOp accumulator layouts template < typename ThreadblockShape_, typename WarpShape_, int PartitionsK, typename ElementOutput_, int ElementsPerAccess > struct DefaultThreadMapVoltaTensorOp< ThreadblockShape_, WarpShape_, PartitionsK, ElementOutput_, ElementsPerAccess, half_t> { using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; static int const kPartitionsK = PartitionsK; using ElementOutput = ElementOutput_; static int const kElementsPerAccess = ElementsPerAccess; using ElementAccumulator = half_t; // // Definitions // struct Detail { static int const kTensorOpRows = 16; static int const kWarpSize = 32; static int const kInterleavedTilesM = WarpShape::kM / 32; static_assert( !(ThreadblockShape::kM % WarpShape::kM) && !(ThreadblockShape::kN % WarpShape::kN), "Divisibility"); /// Number of warps using WarpCount = gemm::GemmShape< ThreadblockShape::kM / WarpShape::kM, ThreadblockShape::kN / WarpShape::kN, kPartitionsK >; /// Number of participating threads static int const kThreads = WarpCount::kCount * kWarpSize; using Shape = cutlass::epilogue::threadblock::OutputTileShape< ThreadblockShape::kN, // column 4, // row 4, // group WarpCount::kM, // cluster 1 // tile >; /// Number of iterations per subspace using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 2, // row kInterleavedTilesM, // group 1, // cluster WarpShape::kM / kTensorOpRows // iterations >; }; // // ThreadMap // /// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap using Type = OutputTileOptimalThreadMap < typename Detail::Shape, typename Detail::Count, Detail::kThreads, kElementsPerAccess, sizeof_bits<ElementOutput>::value >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines the optimal thread map for TensorOp accumulator layouts template < typename ThreadblockShape_, typename WarpShape_, int PartitionsK, typename ElementOutput_, int ElementsPerAccess > struct DefaultThreadMapVoltaTensorOp< ThreadblockShape_, WarpShape_, PartitionsK, ElementOutput_, ElementsPerAccess, float> { using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; static int const kPartitionsK = PartitionsK; using ElementOutput = ElementOutput_; static int const kElementsPerAccess = ElementsPerAccess; using ElementAccumulator = float; // // Definitions // struct Detail { static int const kTensorOpRows = 16; static int const kWarpSize = 32; static int const kInterleavedTilesM = WarpShape::kM / 32; static_assert( !(ThreadblockShape::kM % WarpShape::kM) && !(ThreadblockShape::kN % WarpShape::kN), "Divisibility"); /// Number of warps using WarpCount = gemm::GemmShape< ThreadblockShape::kM / WarpShape::kM, ThreadblockShape::kN / WarpShape::kN, kPartitionsK >; /// Number of participating threads static int const kThreads = WarpCount::kCount * kWarpSize; using Shape = cutlass::epilogue::threadblock::OutputTileShape< ThreadblockShape::kN, // column 4, // row 4, // group WarpCount::kM, // cluster 1 // tile >; /// Number of iterations per subspace using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 2, // row kInterleavedTilesM, // group 1, // cluster WarpShape::kM / kTensorOpRows // iterations >; }; // // ThreadMap // /// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap using Type = OutputTileOptimalThreadMap < typename Detail::Shape, typename Detail::Count, Detail::kThreads, kElementsPerAccess, sizeof_bits<ElementOutput>::value >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h/0
{ "file_path": "include/cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h", "repo_id": "include", "token_count": 2469 }
24
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Generic epilogue for implementing certain kinds of fused epilogue behavior. */ #pragma once ///////////////////////////////////////////////////////////////////////////////////////////////// #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/matrix_coord.h" #include "cutlass/semaphore.h" #include "cutlass/epilogue/threadblock/epilogue_base.h" //////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////////////////////////// class EpilogueFusedVisitorConcept { public: static int const kIterations = 1; static int const kElementsPerAccess = 4; using ElementOutput = float; using ElementAccumulator = float; using AccumulatorFragment = Array<ElementAccumulator, kElementsPerAccess>; /// Arguments structure struct Arguments { }; /// Params structure struct Params { Params() { } Params(Arguments const &args) { } }; /// Shared storage struct SharedStorage { }; public: CUTLASS_DEVICE EpilogueFusedVisitorConcept( Params const &params, ///< Parameters routed to the epilogue SharedStorage &shared_storage, ///< Shared storage needed by the functors here MatrixCoord const &problem_size, ///< Problem size of the output int thread_idx, ///< Thread index within the threadblock int warp_idx, ///< Warp index within the threadblock int lane_idx, ///< Lane index within the warp MatrixCoord const &threadblock_offset = MatrixCoord(0, 0)) { ///< Coordinate } /// Helper to indicate split-K behavior CUTLASS_DEVICE void set_k_partition( int split_k_index, ///< Index of this threadblock within split-K partitioned scheme int split_k_slices) { ///< Total number of split-K slices } /// Called to set the batch index CUTLASS_DEVICE void set_batch_index(int batch_idx) { } /// Called at the start of the epilogue just before iterating over accumulator slices CUTLASS_DEVICE void begin_epilogue() { } /// Called at the start of one step before starting accumulator exchange CUTLASS_DEVICE void begin_step(int step_idx) { } /// Called at the start of a row CUTLASS_DEVICE void begin_row(int row_idx) { } /// Called after accumulators have been exchanged for each accumulator vector CUTLASS_DEVICE void visit( int iter_idx, int row_idx, int column_idx, int frag_idx, AccumulatorFragment const &accum) { } /// Called at the end of a row CUTLASS_DEVICE void end_row(int row_idx) { } /// Called after all accumulator elements have been visited CUTLASS_DEVICE void end_step(int step_idx) { } /// Called after all steps have been completed CUTLASS_DEVICE void end_epilogue() { } }; //////////////////////////////////////////////////////////////////////////////////////////////////// /// Epilogue operator template < typename Visitor_, ///< Functor containing fused operations (satisfies EpilogueFusedVisitorConcept) typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape) int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large (true || !IsEpilogueFunctorHeavy<Visitor_>::value) > class EpilogueWithVisitor : public EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition> { public: using Visitor = Visitor_; using Base = EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition>; using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; static int const kPartitionsK = PartitionsK; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using SharedLoadIterator = SharedLoadIterator_; using Padding = Padding_; using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// The complete warp-level accumulator tile using AccumulatorTile = typename Base::AccumulatorTile; /// Accumulator element using ElementAccumulator = typename WarpTileIterator::Element; /// Output access size static int const kElementsPerAccess = Visitor::kElementsPerAccess; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Array type used by output functor using AccumulatorAccessType = Array< typename WarpTileIterator::Element, kElementsPerAccess>; /// Number of warps using WarpCount = typename Base::WarpCount; static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK; static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles; using SharedStorage = typename Base::SharedStorage; private: /// Loads fragment from shared memory aligned with output tensor SharedLoadIterator shared_load_iterator_; public: /// Constructor CUTLASS_DEVICE EpilogueWithVisitor( SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx ///< Id of thread within warp ): Base(shared_storage, thread_idx, warp_idx, lane_idx), shared_load_iterator_(shared_storage.reference(), thread_idx) { } /// Streams the result to global memory CUTLASS_DEVICE void operator()( Visitor & visitor, AccumulatorTile const &accumulators) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles) visitor.begin_epilogue(); // // Iterator over warp-level accumulator fragment // AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // // Iterate over accumulator tile // #pragma unroll(IterationsUnroll ? Visitor::kIterations : 1) for (int iter_idx = 0; iter_idx < Visitor::kIterations; ++iter_idx) { // // Load the source // visitor.begin_step(iter_idx); // // Convert and store fragment // __syncthreads(); acc2smem_source_needed<cutlass::make_index_sequence<Visitor::kIterations>>::push( iter_idx, accum_fragment_iterator, this->warp_tile_iterator_); __syncthreads(); // // Load fragments from shared memory // typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK]; shared_load_iterator_.load(aligned_accum_fragment[0]); // If the number of k-slices is > 1 - perform a reduction amongst the k-slices if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); shared_load_iterator_.load(aligned_accum_fragment[i]); aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]); } shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset); } // // Iterate over output fragments // AccumulatorAccessType const *accum_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment[0]); int const kAccumulatorFragmentCount = AccumulatorTile::kElements / (Visitor::kIterations * AccumulatorAccessType::kElements); CUTLASS_PRAGMA_UNROLL for (int idx = 0; idx < kAccumulatorFragmentCount; ++idx) { int row_idx = idx / SharedLoadIterator::ThreadMap::Iterations::kColumn; int col_idx = idx % SharedLoadIterator::ThreadMap::Iterations::kColumn; // Start a new row of the output fragment if (!col_idx) { visitor.begin_row(row_idx); } visitor.visit( iter_idx, row_idx, col_idx, idx, accum_frag_ptr[idx] ); // End the row of the output fragment if (col_idx + 1 == SharedLoadIterator::ThreadMap::Iterations::kColumn) { visitor.end_row(row_idx); } } // // Conclude the step // visitor.end_step(iter_idx); } visitor.end_epilogue(); } private: template<class Seq> struct acc2smem_source_needed; template <size_t... Seq> struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> { template<int Advance> CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator, WarpTileIterator &warp_tile_iterator) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; } typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); warp_tile_iterator.store(accum_fragment); } CUTLASS_DEVICE static void push(size_t pos, AccumulatorFragmentIterator const &iterator_begin, WarpTileIterator &warp_tile_iterator) { int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...}; } }; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Helper to create an EpilogueWithVisitor from an existing epilogue template <typename Visitor_, typename Existing_, bool IterationsUnroll = true> struct EpilogueWithVisitorFromExistingEpilogue { using Epilogue = EpilogueWithVisitor< Visitor_, typename Existing_::Shape, typename Existing_::WarpMmaOperator, Existing_::kPartitionsK, typename Existing_::AccumulatorFragmentIterator, typename Existing_::WarpTileIterator, typename Existing_::SharedLoadIterator, typename Existing_::Padding, Existing_::kFragmentsPerIteration, IterationsUnroll >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/epilogue/threadblock/epilogue_with_visitor.h/0
{ "file_path": "include/cutlass/epilogue/threadblock/epilogue_with_visitor.h", "repo_id": "include", "token_count": 4847 }
25
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/permute.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/memory.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h" #include "cutlass/conv/conv2d_problem_size.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { //////////////////////////////////////////////////////////////////////////////// namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load and store output tile from global memory in epilogue. /// /// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: PitchLinearThreadMap) typename Element_, ///< Element data type typename ThreadOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1>, typename ThreadBlockOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1> > class PredicatedTileIteratorDirectConv { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::Shape; using ThreadOutputShape = ThreadOutputShape_; using ThreadBlockOutputShape = ThreadBlockOutputShape_; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kThreads = ThreadMap::kThreads; using ConvProblemSize = typename cutlass::conv::Conv2dProblemSize; /// Fragment object using Fragment = Array<Element, ThreadMap::Iterations::kCount * kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray<Element, kElementsPerAccess>; static int const kLoadsPerAccess = AccessType::kElements / AccessType::kElements; using ThreadTileCount = MatrixShape< ThreadBlockOutputShape::kH / ThreadOutputShape::kH, ThreadBlockOutputShape::kW / ThreadOutputShape::kW >; // // Parameters struct // /// Uses a non-template class struct Params : PredicatedTileIteratorDirect2dConvParams { using Base = PredicatedTileIteratorDirect2dConvParams; CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Layout const &layout, cutlass::conv::Conv2dProblemSize const &problem_size): PredicatedTileIteratorDirect2dConvParams( layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess, problem_size, {ThreadBlockOutputShape::kH, ThreadBlockOutputShape::kW} ) { } CUTLASS_HOST_DEVICE Params(Base const &base) : Base(base) { } }; /// Mask object struct Mask { static int const kCount = ThreadMap::Iterations::kContiguous; /// Predicate state bool predicates[kCount]; // // Mask // CUTLASS_HOST_DEVICE Mask() { enable(); } ///< Efficiently disables all accesses guarded by mask CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = false; } } ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask CUTLASS_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kCount; ++i) { predicates[i] = true; } } }; private: // // Data members // /// Parameters structure containing reference and precomputed state. PredicatedTileIteratorDirect2dConvParams params_; /// Byte-level pointer uint8_t *byte_pointer_; /// Element *pointer_; /// Array of boolean values to contain steady-state predicates Mask mask_; /// Extent of the matrix tile in rows Index extent_row_; /// Extent of the matrix tile in rows Index extent_column_; /// A thread's starting row position (assuming steady-state predicates have been computed) Index thread_start_row_; /// A thread's starting column Index thread_start_column_; /// Initial thread output location int thread_start_n_, thread_start_p_, thread_start_q_; /// Current threadblock tile index int tile_index_; // // Static asserts about internal strides // static_assert(sizeof(extent_row_) == 4, "Expected 32b extents"); static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents"); static_assert(sizeof(PredicatedTileIteratorDirect2dConvParams::stride) == 8, "Expected 64b strides"); private: // // Methods // public: // // Methods // /// Constructor CUTLASS_DEVICE PredicatedTileIteratorDirectConv( PredicatedTileIteratorDirect2dConvParams const & params, Element *pointer, TensorCoord extent, int thread_idx, TensorCoord threadblock_offset = TensorCoord() ): params_(params), pointer_(pointer) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx); extent_row_ = extent.row(); extent_column_ = extent.column(); // stride dim (PQ) thread_start_row_ = thread_offset.column(); // contiguous dim (Channels) thread_start_column_ = threadblock_offset.column() + thread_offset.row(); tile_index_ = threadblock_offset.row(); set_tile_index(0); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void set_tile_index(const int index) { int residual; params_.pq_divmod(thread_start_n_, residual, tile_index_ + index); params_.q_divmod(thread_start_p_, thread_start_q_, residual); // Compute the base output coord of ThreadBlock thread_start_p_ *= ThreadBlockOutputShape::kH; thread_start_q_ *= ThreadBlockOutputShape::kW; // Initialize predicates CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { mask_.predicates[c] = ((thread_start_column_ + c * ThreadMap::Delta::kContiguous) < extent_column_); } // Null pointer performs no accesses if (!pointer_) { mask_.clear(); } } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_byte_offset(Fragment &frag, int64_t byte_offset) const { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int frag_base_idx = s * ThreadMap::Iterations::kContiguous + c; int current_row = thread_start_row_ + s * ThreadMap::Delta::kStrided; int p = current_row / ThreadBlockOutputShape::kW; int q = current_row % ThreadBlockOutputShape::kW; int current_p = thread_start_p_ + p; int current_q = thread_start_q_ + q; bool row_guard = (current_p) < params_.P && (current_q) < params_.Q && (thread_start_n_ < params_.N) && current_row < ThreadMap::Shape::kStrided; int output_row_offset = thread_start_n_ * params_.stride_n + current_p * params_.stride_p + current_q; uint8_t *byte_pointer = reinterpret_cast<uint8_t *>(pointer_) + LongIndex(output_row_offset) * LongIndex(params_.stride) + LongIndex(thread_start_column_ + c * ThreadMap::Delta::kContiguous) * sizeof(AccessType) / kElementsPerAccess; AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); bool guard = row_guard && mask_.predicates[c]; cutlass::arch::global_load<AccessType, sizeof(AccessType)>( frag_ptr[frag_base_idx], (void *)&memory_pointer[0], guard); } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Stores a fragment to memory CUTLASS_DEVICE void store_with_byte_offset(Fragment const &frag, int64_t byte_offset) const { CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int frag_base_idx = s * ThreadMap::Iterations::kContiguous + c; int current_row = thread_start_row_ + s * ThreadMap::Delta::kStrided; int p = current_row / ThreadBlockOutputShape::kW; int q = current_row % ThreadBlockOutputShape::kW; int current_p = thread_start_p_ + p; int current_q = thread_start_q_ + q; bool row_guard = (current_p) < params_.P && (current_q) < params_.Q && (thread_start_n_ < params_.N) && current_row < ThreadMap::Shape::kStrided; int output_row_offset = thread_start_n_ * params_.stride_n + current_p * params_.stride_p + current_q; uint8_t *byte_pointer = reinterpret_cast<uint8_t *>(pointer_) + LongIndex(output_row_offset) * LongIndex(params_.stride) + LongIndex(thread_start_column_ + c * ThreadMap::Delta::kContiguous) * sizeof(AccessType) / kElementsPerAccess; AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); AccessType *memory_pointer = reinterpret_cast<AccessType *>(byte_pointer + byte_offset); bool guard = row_guard && mask_.predicates[c]; cutlass::arch::global_store<AccessType, sizeof(AccessType)>( frag_ptr[frag_base_idx], (void *)&memory_pointer[0], guard); } } } /// Stores a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) const { store_with_byte_offset(frag, 0); } CUTLASS_DEVICE MatrixCoord thread_start() const { return MatrixCoord(thread_start_row_, thread_start_column_); } /// Need to get the thread start row from the tile iterator CUTLASS_DEVICE int32_t thread_start_row() const { return thread_start_row_; } /// Need to get the thread start row from the tile iterator CUTLASS_DEVICE int32_t thread_start_column() const { return thread_start_column_; } /// Extent of the matrix in rows CUTLASS_DEVICE Index extent_row() const { return extent_row_; } /// Extent of the matrix in columns CUTLASS_DEVICE Index extent_column() const { return extent_column_; } /// Advances to the next position to load or store CUTLASS_HOST_DEVICE PredicatedTileIteratorDirectConv &operator++() { // do nothing return *this; } ///< Efficiently disables all accesses guarded by mask CUTLASS_DEVICE void clear_mask() { mask_.clear(); } ///< Efficiently enables all accesses guarded by mask CUTLASS_DEVICE void enable_mask() { mask_.enable(); } ///< Sets the mask CUTLASS_DEVICE void get_mask(Mask &mask) const { mask = mask_; } ///< Sets the mask CUTLASS_DEVICE void set_mask(Mask const &mask) { mask_ = mask; } }; /////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/epilogue/threadblock/predicated_tile_iterator_direct_conv.h/0
{ "file_path": "include/cutlass/epilogue/threadblock/predicated_tile_iterator_direct_conv.h", "repo_id": "include", "token_count": 4873 }
26
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/array.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/epilogue/warp/tensor_op_policy.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename Element, ///< data type of element to be written typename Layout ///< target shared memory layout > class TileIteratorTensorOp; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename Element_ ///< data type of element to be written > class TileIteratorTensorOp<WarpShape_, OperatorShape_, Element_, layout::RowMajor> { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using Element = Element_; using Layout = layout::RowMajor; using TensorLayout = Layout; using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; /// Shape of the tile in memory using Shape = MatrixShape< Policy::kRowsPerIteration, WarpShape::kN >; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< Element, Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>; /// This is the complete warp-level accumulator tile. //using AccumulatorTile = typename Operator::FragmentC; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; /// Number of times this iterator can be incremented using TileIterations = typename Policy::TileIterations; // Internal constants struct Detail { static int const kLanesInQuad = 4; }; /// Padding quantity using Padding = MatrixShape< 0, Detail::kLanesInQuad * Policy::kElementsPerAccess>; private: /// Storage type for accessing memory using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>; // // Data members // /// Internal pointer to memory AccessType *pointer_; /// Internal layout object Layout layout_; /// Thread offset MatrixCoord thread_offset_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorTensorOp(): pointer_(nullptr) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorTensorOp( TensorRef const &ref, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0] / Policy::kElementsPerAccess) { int quad_id = (lane_id / Detail::kLanesInQuad); int lane_in_quad = (lane_id % Detail::kLanesInQuad); thread_offset_ = { quad_id, lane_in_quad * Policy::kElementsPerAccess }; pointer_ += layout_({thread_offset_.row(), thread_offset_.column() / Policy::kElementsPerAccess}); } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorTensorOp & add_pointer_offset(Index pointer_offset) { pointer_ += pointer_offset / Policy::kElementsPerAccess; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOp & add_tile_offset(TensorCoord const &tile_offset) { MatrixCoord coord_offset( tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn ); thread_offset_ += coord_offset; pointer_ += layout_({ coord_offset.row(), coord_offset.column() / Policy::kElementsPerAccess }); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOp & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { pointer_[n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess] = frag_ptr[n]; } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { frag_ptr[n] = pointer_[n * Detail::kLanesInQuad + pointer_offset / Policy::kElementsPerAccess]; } } /// Load CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } CUTLASS_HOST_DEVICE TileIteratorTensorOp & operator++() { return add_tile_offset({1, 0}); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename Element_, ///< data type of element to be written int InterleavedK ///< number of interleaved k > class TileIteratorTensorOp<WarpShape_, OperatorShape_, Element_, layout::ColumnMajorInterleaved<InterleavedK> > { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using Element = Element_; using Layout = layout::ColumnMajorInterleaved<InterleavedK>; using TensorLayout = Layout; ///< shared memory tensor ref layout using TensorRef = TensorRef<Element, TensorLayout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; /// Shape of the tile in memory using Shape = MatrixShape< // Policy::kRowsPerIteration, WarpShape::kM, InterleavedK >; /// This is the fragment size produced by one tile using Fragment = Array< Element, Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction * Policy::kElementsPerIteration>; /// This is the fragment size produced by one iteration // using Fragment = Array< // Element, Policy::kElementsPerIteration >; /// This is the complete warp-level accumulator tile. //using AccumulatorTile = typename Operator::FragmentC; /// Number of times this iterator can be incremented using TileIterations = typename Policy::TileIterations; // Internal constants struct Detail { static int const kLanesInQuad = 4; }; /// Padding quantity using Padding = MatrixShape< 0, Detail::kLanesInQuad * Policy::kElementsPerIteration>; private: /// Storage type for accessing memory using AccessType = AlignedArray<Element, Policy::kElementsPerAccess>; // // Data members // /// Internal pointer to memory AccessType *pointer_; /// Internal layout object TensorLayout layout_; /// Thread offset MatrixCoord thread_offset_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorTensorOp(): pointer_(nullptr) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorTensorOp( TensorRef const &ref, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0]) { int quad_id = (lane_id / Detail::kLanesInQuad); int lane_in_quad = (lane_id % Detail::kLanesInQuad); thread_offset_ = { quad_id, lane_in_quad * Policy::kElementsPerIteration }; pointer_ += (layout_({thread_offset_.row(), thread_offset_.column()}) / Policy::kElementsPerAccess); } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorTensorOp & add_pointer_offset(Index pointer_offset) { pointer_ += pointer_offset / Policy::kElementsPerAccess; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOp & add_tile_offset(TensorCoord const &tile_offset) { MatrixCoord coord_offset( tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn ); thread_offset_ += coord_offset; pointer_ += (layout_({ coord_offset.row(), coord_offset.column() }) / Policy::kElementsPerAccess); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOp & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction; n++ ) { AccessType *ptr = pointer_ + layout_({n * Policy::kRowsPerIteration, 0}) / Policy::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int a = 0; a < Policy::kAccessPerIteration; ++a) { ptr[a + pointer_offset / Policy::kElementsPerAccess] = frag_ptr[n * Policy::kAccessPerIteration + a]; // printf("store thread %d, address %p, bank %ld\n", threadIdx.x, pointer_+a+n*Detail::kLanesInQuad, // ((long long)(pointer_+a+n*Detail::kLanesInQuad)>>2)&0x1f); } } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kRow * Policy::kIterationsPerInstruction; n++ ) { AccessType *ptr = pointer_ + layout_({n * Policy::kRowsPerIteration, 0}) / Policy::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int a = 0; a < Policy::kAccessPerIteration; ++a) { frag_ptr[n * Policy::kAccessPerIteration + a] = ptr[a + pointer_offset / Policy::kElementsPerAccess]; } } } /// Load CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } CUTLASS_HOST_DEVICE TileIteratorTensorOp & operator++() { return add_tile_offset({0, 1}); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template for reading and writing tiles of accumulators to shared memory template < typename WarpShape_, ///< shape of warp-level GEMM (concept: GemmShape) typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape) typename Element_, ///< data type of element to be written typename Layout_ > class TileIteratorTensorOpCanonical { public: using WarpShape = WarpShape_; using OperatorShape = OperatorShape_; using Element = Element_; using Layout = Layout_; using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor using Index = typename TensorRef::Index; using LongIndex = typename TensorRef::LongIndex; using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>; static int const kAccessSize = 1; static int const kAccessCount = Policy::kElementsPerAccess / kAccessSize; /// Shape of the tile in memory using Shape = MatrixShape< Policy::kRowsPerIteration, WarpShape::kN >; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< Element, Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>; /// This is the complete warp-level accumulator tile. //using AccumulatorTile = typename Operator::FragmentC; /// Number of times this iterator can be incremented static int const kIterations = Policy::kIterations; // Internal constants struct Detail { static int const kLanesInQuad = 4; }; /// Padding quantity using Padding = MatrixShape< 0, Detail::kLanesInQuad * Policy::kElementsPerAccess>; private: /// Storage type for accessing memory using AccessType = AlignedArray<Element, kAccessSize>; // // Data members // /// Internal pointer to memory AccessType *pointer_; /// Internal layout object Layout layout_; /// Guard to indicate whether the shape is divisible bool divisible_; /// Extent of the output tensor MatrixCoord extent_; /// Thread offset MatrixCoord thread_offset_; public: /// Default constructor CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical(): pointer_(nullptr) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical( TensorRef const &ref, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0]), divisible_(true), extent_(WarpShape::kM, WarpShape::kN) { int quad_id = (lane_id / Detail::kLanesInQuad); int lane_in_quad = (lane_id % Detail::kLanesInQuad); thread_offset_ = { quad_id, lane_in_quad * Policy::kElementsPerAccess }; pointer_ += layout_({thread_offset_.row(), thread_offset_.column()}); } /// Constructor from TensorRef CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical( TensorRef const &ref, TensorCoord const &extent, unsigned lane_id ): pointer_(reinterpret_cast<AccessType *>(ref.data())), layout_(ref.stride()[0]), divisible_(false), extent_(extent) { int quad_id = (lane_id / Detail::kLanesInQuad); int lane_in_quad = (lane_id % Detail::kLanesInQuad); thread_offset_ = { quad_id, lane_in_quad * Policy::kElementsPerAccess }; pointer_ += layout_({thread_offset_.row(), thread_offset_.column()}); } /// Adds a pointer offset CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical & add_pointer_offset(Index pointer_offset) { pointer_ += pointer_offset; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical & add_tile_offset(TensorCoord const &tile_offset) { MatrixCoord coord_offset( tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn ); thread_offset_ += coord_offset; pointer_ += layout_({ coord_offset.row(), coord_offset.column() }); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } /// Store CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { CUTLASS_PRAGMA_UNROLL for (int a = 0; a < kAccessCount; ++a) { int ptr_idx = n * Detail::kLanesInQuad * kAccessCount + pointer_offset + a; int frag_idx = n * kAccessCount + a; int col = thread_offset_.column() + n * Detail::kLanesInQuad * Policy::kElementsPerAccess + a; if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) { pointer_[ptr_idx] = frag_ptr[frag_idx]; } } } } /// Store CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Load CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) { CUTLASS_PRAGMA_UNROLL for (int a = 0; a < kAccessCount; ++a) { int ptr_idx = n * Detail::kLanesInQuad * kAccessCount + pointer_offset + a; int frag_idx = n * kAccessCount + a; int col = thread_offset_.column() + n * Detail::kLanesInQuad * Policy::kElementsPerAccess + a; if (divisible_ || (thread_offset_.row() < extent_.row() && col < extent_.column())) { frag_ptr[frag_idx] = pointer_[ptr_idx]; } } } } /// Load CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } CUTLASS_HOST_DEVICE TileIteratorTensorOpCanonical & operator++() { return add_tile_offset({1, 0}); } /// Set smem base address CUTLASS_HOST_DEVICE void set_smem_base_address(Index address) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/epilogue/warp/tile_iterator_tensor_op.h/0
{ "file_path": "include/cutlass/epilogue/warp/tile_iterator_tensor_op.h", "repo_id": "include", "token_count": 6931 }
27
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cute/algorithm/clear.hpp" #include "cute/tensor.hpp" ////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////FP8 Accumulation/////////////////////////// ////////////////////////////////////////////////////////////////////////////// /// It would promote (add) the results from the tensor core accumulators to the /// main accumulators when the number of MMAs reaches the max number of MMA /// interval specified by user, after that the tensor core accumulators are /// zeroed. ////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::collective { template < class EngineAccum, class LayoutAccum> struct GmmaFP8Accumulation { using TensorAccum = cute::Tensor<EngineAccum, LayoutAccum>; static_assert(is_static<LayoutAccum>::value, "Accumulator Layout should be static"); static_assert(is_rmem<TensorAccum>::value , "Accumulator tensor must be rmem resident."); private: TensorAccum& accum_; TensorAccum accum_temp_; uint32_t accum_promotion_interval_; // defines the max num of executed MMAs after which accum should be promoted. uint32_t mma_count_per_mainloop_iteration_; // num of MMAs per k_tile of mainloop uint32_t mma_count_; // current executed MMAs uint32_t reset_accum_flag_; // accum needs to be zeroed or not. CUTLASS_DEVICE void promote_core() { warpgroup_wait<0>(); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < size(accum_); ++i) { accum_(i) += accum_temp_(i); } } public: CUTLASS_DEVICE GmmaFP8Accumulation( TensorAccum &accum, uint32_t accum_promotion_interval, uint32_t mma_count_per_mainloop_iteration) : accum_(accum), accum_promotion_interval_(accum_promotion_interval), mma_count_per_mainloop_iteration_(mma_count_per_mainloop_iteration), mma_count_(0), reset_accum_flag_(0) { accum_temp_ = cute::make_fragment_like(accum); } CUTLASS_DEVICE TensorAccum& operator()() { return accum_temp_; } /// prepare the MMA accumulators when initialization or zeroing is required. CUTLASS_DEVICE bool prepare_if_needed() { return reset_accum_flag_; } /// promote (add) the results from the MMA accumulators to main accumulator if needed. CUTLASS_DEVICE void promote_if_needed() { mma_count_ += mma_count_per_mainloop_iteration_; reset_accum_flag_ = __shfl_sync(0xffffffff, mma_count_ == accum_promotion_interval_, 0); if (reset_accum_flag_) { promote_core(); mma_count_ = 0; } } /// promote (add) the residue results from the MMA accumulators to main accumulator if needed. CUTLASS_DEVICE void promote_residue_if_needed() { if (__shfl_sync(0xffffffff, mma_count_ > 0, 0)) { promote_core(); } } }; } // namespace cutlass::gemm::collective
include/cutlass/gemm/collective/fp8_accumulation.hpp/0
{ "file_path": "include/cutlass/gemm/collective/fp8_accumulation.hpp", "repo_id": "include", "token_count": 1530 }
28
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level Rank2K definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/blas3.h" #include "cutlass/layout/matrix.h" #include "cutlass/arch/wmma.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/rank_2k_universal.h" #include "cutlass/gemm/threadblock/default_mma_core_sm75.h" #include "cutlass/gemm/threadblock/default_mma_core_sm70.h" #include "cutlass/gemm/threadblock/default_mma_core_sm80.h" #include "cutlass/gemm/threadblock/default_mma.h" #include "cutlass/gemm/threadblock/default_mma_core_simt.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op_blas3.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_simt.h" #include "cutlass/transform/threadblock/predicated_tile_iterator.h" #if defined(CUTLASS_ARCH_WMMA_ENABLED) #include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h" #endif //CUTLASS_ARCH_WMMA_ENABLED //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { //////////////////////////////////////////////////////////////////////////////// template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC_, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator, /// Blas3 computation mode BlasMode BlasMode_ = BlasMode::kSymmetric> struct DefaultRank2K; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Hopper Architecture template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of A matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC, /// Element type for internal accumulation typename ElementAccumulator, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator> struct DefaultRank2K< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp, arch::Sm90, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator> { /// Define the threadblock-scoped matrix multiply-accumulate (A x BT) using Mma1 = typename cutlass::gemm::threadblock::DefaultMma< ElementA, LayoutA, kAlignmentA, ElementB, typename layout::LayoutTranspose<LayoutB>::type, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator>::ThreadblockMma; /// Define the threadblock-scoped matrix multiply-accumulate (B x AT) using Mma2 = typename cutlass::gemm::threadblock::DefaultMma< ElementB, LayoutB, kAlignmentB, ElementA, typename layout::LayoutTranspose<LayoutA>::type, kAlignmentA, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator>::ThreadblockMma; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpBlas3< ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount, BlasMode::kSymmetric>::Epilogue; /// Define the kernel-level Rank2K operator. using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, BlasMode::kSymmetric>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Ampere Architecture template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of A matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC, /// Element type for internal accumulation typename ElementAccumulator, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator> struct DefaultRank2K< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator> { /// Define the threadblock-scoped matrix multiply-accumulate (A x BT) using Mma1 = typename cutlass::gemm::threadblock::DefaultMma< ElementA, LayoutA, kAlignmentA, ElementB, typename layout::LayoutTranspose<LayoutB>::type, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator>::ThreadblockMma; /// Define the threadblock-scoped matrix multiply-accumulate (B x AT) using Mma2 = typename cutlass::gemm::threadblock::DefaultMma< ElementB, LayoutB, kAlignmentB, ElementA, typename layout::LayoutTranspose<LayoutA>::type, kAlignmentA, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator>::ThreadblockMma; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpBlas3< ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount, BlasMode::kSymmetric>::Epilogue; /// Define the kernel-level Rank2K operator. using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, BlasMode::kSymmetric>; }; //////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass
include/cutlass/gemm/kernel/default_rank_2k.h/0
{ "file_path": "include/cutlass/gemm/kernel/default_rank_2k.h", "repo_id": "include", "token_count": 3908 }
29
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/blas3.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/complex.h" #include "cutlass/semaphore.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function FillMode FillModeC_ ///! Fill Mode for C (kLower or kUpper) > struct RankKUniversal { public: using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename Epilogue::OutputTileIterator::Layout; static FillMode const kFillModeC = FillModeC_; static ComplexTransform const kTransformA = Mma::kTransformA; static ComplexTransform const kTransformB = Mma::kTransformB; using Operator = typename Mma::Operator; using OperatorClass = typename Mma::Operator::OperatorClass; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename Mma::Operator::Shape; using InstructionShape = typename Mma::Policy::Operator::InstructionShape; using ArchTag = typename Mma::ArchTag; static int const kStages = Mma::kStages; static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; /// Split-K preserves splits that are 128b aligned static int const kSplitKAlignment = 128 / sizeof_bits<ElementA>::value; // // Structures // /// Argument structure struct Arguments { // // Data members // GemmUniversalMode mode{GemmUniversalMode::kGemm}; GemmCoord problem_size{}; int batch_count{1}; typename EpilogueOutputOp::Params epilogue{}; void const * ptr_A{nullptr}; void const * ptr_C{nullptr}; void * ptr_D{nullptr}; int64_t batch_stride_A{0}; int64_t batch_stride_C{0}; int64_t batch_stride_D{0}; typename LayoutA::Stride::Index lda{}; typename LayoutB::Stride::Index ldb{}; typename LayoutC::Stride::Index ldc{}; typename LayoutC::Stride::Index ldd{}; bool allow_early_exit{false}; // // Methods // Arguments() = default; /// constructs an arguments structure Arguments( GemmUniversalMode mode, GemmCoord problem_size, int batch_count, typename EpilogueOutputOp::Params epilogue, void const * ptr_A, void const * ptr_C, void * ptr_D, int64_t batch_stride_A, int64_t batch_stride_C, int64_t batch_stride_D, typename LayoutA::Stride::Index lda, typename LayoutC::Stride::Index ldc, typename LayoutC::Stride::Index ldd, bool allow_early_exit = false ): mode(mode), problem_size(problem_size), batch_count(batch_count), epilogue(epilogue), ptr_A(ptr_A), ptr_C(ptr_C), ptr_D(ptr_D), batch_stride_A(batch_stride_A), batch_stride_C(batch_stride_C), batch_stride_D(batch_stride_D), lda(lda), ldb(0), ldc(ldc), ldd(ldd), allow_early_exit(allow_early_exit) { } }; // // Structure for precomputing values in host memory and passing to kernels // /// Parameters structure struct Params { cutlass::gemm::GemmCoord problem_size{}; cutlass::gemm::GemmCoord grid_tiled_shape{}; int swizzle_log_tile{0}; typename Mma::IteratorA::Params params_A{}; typename Mma::IteratorB::Params params_B{}; typename Epilogue::OutputTileIterator::Params params_C{}; typename Epilogue::OutputTileIterator::Params params_D{}; typename EpilogueOutputOp::Params output_op{}; GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm; int batch_count{0}; int gemm_k_size{0}; void * ptr_A{nullptr}; void * ptr_B{nullptr}; void * ptr_C{nullptr}; void * ptr_D{nullptr}; int64_t batch_stride_A{0}; int64_t batch_stride_B{0}; int64_t batch_stride_C{0}; int64_t batch_stride_D{0}; int *semaphore{nullptr}; bool allow_early_exit{false}; // // Methods // Params() = default; CUTLASS_HOST_DEVICE Params( Arguments const &args, cutlass::gemm::GemmCoord const & grid_tiled_shape, int gemm_k_size, void *workspace = nullptr ): problem_size(args.problem_size), grid_tiled_shape(grid_tiled_shape), swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), params_A(args.lda), params_B(args.lda), params_C(args.ldc), params_D(args.ldd), output_op(args.epilogue), mode(args.mode), batch_count(args.batch_count), gemm_k_size(gemm_k_size), ptr_A(const_cast<void *>(args.ptr_A)), ptr_B(const_cast<void *>(args.ptr_A)), ptr_C(const_cast<void *>(args.ptr_C)), ptr_D(const_cast<void *>(args.ptr_D)), batch_stride_A(args.batch_stride_A), batch_stride_B(args.batch_stride_A), batch_stride_C(args.batch_stride_C), batch_stride_D(args.batch_stride_D), semaphore(static_cast<int *>(workspace)), allow_early_exit(args.allow_early_exit) { } CUTLASS_HOST_DEVICE void update( Arguments const &args, void *workspace = nullptr) { ptr_A = const_cast<void *>(args.ptr_A); ptr_B = const_cast<void *>(args.ptr_A); ptr_C = const_cast<void *>(args.ptr_C); ptr_D = args.ptr_D; output_op = args.epilogue; semaphore = static_cast<int *>(workspace); } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; public: // // Methods // CUTLASS_DEVICE RankKUniversal() { } /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size) { static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) || (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) { return Status::kErrorMisalignedOperand; } return Status::kSuccess; } static Status can_implement(Arguments const &args) { return can_implement(args.problem_size); } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit following LAPACK's definition if (params.allow_early_exit && (params.output_op.alpha == ElementC(0)) && (params.output_op.beta == ElementC(1))) { return; } // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } // Early exit if Fill Mode is Lower and // if the entire tile is above the main diagonal (bottom-left corner is at or above the diagonal) if (kFillModeC == cutlass::FillMode::kLower && (threadblock_tile_offset.m() + 1) * Mma::Shape::kM <= threadblock_tile_offset.n() * Mma::Shape::kN) { return; } // Early exit if Fill Mode is Upper and // if the entire tile is below the main diagonal (top-right corner is at or below the diagonal) if (kFillModeC == cutlass::FillMode::kUpper && threadblock_tile_offset.m() * Mma::Shape::kM >= (threadblock_tile_offset.n() + 1) * Mma::Shape::kN) { return; } bool tile_on_diagonal = false; // Mark tiles that are being crossed by the main diagonal // (top-right and bottom-left corners are on either side of the diagonal) if ((threadblock_tile_offset.m() + 1) * Mma::Shape::kM > threadblock_tile_offset.n() * Mma::Shape::kN && threadblock_tile_offset.m() * Mma::Shape::kM < (threadblock_tile_offset.n() + 1) * Mma::Shape::kN) { tile_on_diagonal = true; } int offset_k = 0; int problem_size_k = params.problem_size.k(); ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A); ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B); // // Fetch pointers based on mode. // if (params.mode == GemmUniversalMode::kGemm || params.mode == GemmUniversalMode::kGemmSplitKParallel) { if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; } offset_k = threadblock_tile_offset.k() * params.gemm_k_size; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; } else if (params.mode == GemmUniversalMode::kArray) { ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()]; ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()]; } __syncthreads(); // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ threadblock_tile_offset.m() * Mma::Shape::kM, offset_k, }; cutlass::MatrixCoord tb_offset_B{ offset_k, threadblock_tile_offset.n() * Mma::Shape::kN }; // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.params_A, ptr_A, {params.problem_size.m(), problem_size_k}, thread_idx, tb_offset_A); typename Mma::IteratorB iterator_B( params.params_B, ptr_B, {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; // Compute threadblock-scoped matrix multiply-add mma( gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); // // Epilogue // EpilogueOutputOp output_op(params.output_op); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.n() * Mma::Shape::kN ); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C); ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D); // // Fetch pointers based on mode. // // Construct the semaphore. Semaphore semaphore(params.semaphore + block_idx, thread_idx); if (params.mode == GemmUniversalMode::kGemm) { // If performing a reduction via split-K, fetch the initial synchronization if (params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } } else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kArray) { ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()]; ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()]; } // If CTA not on diagonal, FillMode doesn't apply. FillMode kFillModeCTA = tile_on_diagonal ? kFillModeC : FillMode::kNone; // Tile iterator loading from source tensor. typename Epilogue::OutputTileIterator iterator_C( params.params_C, ptr_C, params.problem_size.mn(), thread_idx, threadblock_offset, kFillModeCTA ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.problem_size.mn(), thread_idx, threadblock_offset, kFillModeCTA ); Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_offset.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_offset.k()); __threadfence(); } // Execute the epilogue operator to update the destination tensor. epilogue( output_op, iterator_D, accumulators, iterator_C); // // Release the semaphore // if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_offset.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/gemm/kernel/rank_k_universal.h/0
{ "file_path": "include/cutlass/gemm/kernel/rank_k_universal.h", "repo_id": "include", "token_count": 6750 }
30
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/fast_math.h" #include "cutlass/gemm_coord.hpp" #include "cutlass/kernel_hardware_info.hpp" #include "cutlass/gemm/kernel/tile_scheduler_params.h" #include "cute/layout.hpp" #include "cute/tensor.hpp" #include "cute/arch/cluster_sm90.hpp" #include "cutlass/pipeline/pipeline.hpp" namespace cutlass::gemm::kernel::detail { /////////////////////////////////////////////////////////////////////////////// // Users are not supposed to use this class directly. // This is a CRTP base class for the actual tile schedulers. template<class Subclass> class StaticPersistentTileScheduler { // // Data members // private: uint64_t current_work_linear_idx_; uint64_t total_grid_size_; public: struct WorkTileInfo { int32_t M_idx = 0; int32_t N_idx = 0; int32_t L_idx = 0; bool is_valid_tile = false; CUTLASS_HOST_DEVICE bool is_valid() const { return is_valid_tile; } CUTLASS_HOST_DEVICE static WorkTileInfo invalid_work_tile() { return {-1, -1, -1, false}; } CUTLASS_HOST_DEVICE bool is_final_split(uint32_t k_tiles_per_output_tile) const { return true; } CUTLASS_HOST_DEVICE int32_t reduction_subtile_idx() const { return -1; } }; using Params = PersistentTileSchedulerSm90Params; using RasterOrder = typename Params::RasterOrder; using RasterOrderOptions = typename Params::RasterOrderOptions; public: struct Arguments { int max_swizzle_size = 1; RasterOrderOptions raster_order = RasterOrderOptions::Heuristic; }; template <class ProblemShapeMNKL, class TileShape, class ClusterShape> static Params to_underlying_arguments( ProblemShapeMNKL problem_shape_mnkl, TileShape tile_shape, ClusterShape cluster_shape, [[maybe_unused]] KernelHardwareInfo const& hw_info, Arguments const& arguments, [[maybe_unused]] void* workspace=nullptr, [[maybe_unused]] const uint32_t epilogue_subtile = 1) { // We only need the tile and cluster shape during scheduler setup, so let FTAD do the magic static_assert(cute::is_static<TileShape>::value); static_assert(cute::is_static<ClusterShape>::value); dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, tile_shape, cluster_shape); Params params; params.initialize( problem_blocks, to_gemm_coord(cluster_shape), hw_info, arguments.max_swizzle_size, arguments.raster_order ); return params; } CUTLASS_HOST_DEVICE static bool can_implement(Arguments const& args) { return args.max_swizzle_size >= 1; } CUTLASS_HOST_DEVICE StaticPersistentTileScheduler() { } CUTLASS_DEVICE explicit StaticPersistentTileScheduler(Params const& params_) : scheduler_params(params_) { // MSVC requires protecting use of CUDA-specific nonstandard syntax, // like blockIdx and gridDim, with __CUDA_ARCH__. #if defined(__CUDA_ARCH__) if (params_.raster_order_ == RasterOrder::AlongN) { current_work_linear_idx_ = uint64_t(blockIdx.x) + uint64_t(blockIdx.y) * uint64_t(gridDim.x); } else { current_work_linear_idx_ = uint64_t(blockIdx.x) * uint64_t(gridDim.y) + uint64_t(blockIdx.y); } total_grid_size_ = uint64_t(gridDim.x) * uint64_t(gridDim.y) * uint64_t(gridDim.z); #else CUTLASS_ASSERT(false && "This line should never be reached"); #endif } // Returns the initial work tile info that will be computed over template <class ClusterShape> CUTLASS_DEVICE WorkTileInfo initial_work_tile_info(ClusterShape cluster_shape) { return get_current_work(); } CUTLASS_DEVICE WorkTileInfo get_current_work() const { return get_current_work_for_linear_idx(current_work_linear_idx_); } CUTLASS_DEVICE WorkTileInfo get_current_work_for_linear_idx(uint64_t linear_idx) const { if (linear_idx >= scheduler_params.blocks_per_problem_) { return WorkTileInfo::invalid_work_tile(); } // Map worker's linear index into the CTA tiled problem shape to the corresponding MNL indices uint64_t work_idx_l, remainder; scheduler_params.divmod_batch_(work_idx_l, remainder, linear_idx); uint64_t blk_per_grid_dim = scheduler_params.divmod_cluster_shape_minor_.divide(remainder); auto [work_idx_m, work_idx_n] = Subclass::get_work_idx_m_and_n(blk_per_grid_dim, scheduler_params.divmod_cluster_shape_major_, scheduler_params.divmod_cluster_shape_minor_, scheduler_params.divmod_cluster_blk_major_, scheduler_params.log_swizzle_size_, scheduler_params.raster_order_); return {work_idx_m, work_idx_n, static_cast<int32_t>(work_idx_l), true}; } CUTLASS_DEVICE void advance_to_next_work(uint32_t advance_count = 1) { current_work_linear_idx_ += total_grid_size_ * uint64_t(advance_count); } // Computes the linear index within a batch given M and N tile offsets within the batch. // This essentially inverts the mapping performed in get_work_idx_m_and_n static CUTLASS_DEVICE uint64_t get_linear_idx_from_m_and_n( int32_t tile_m, int32_t tile_n, FastDivmodU64Pow2 const& divmod_cluster_shape_major, FastDivmodU64Pow2 const& divmod_cluster_shape_minor, FastDivmodU64 const& divmod_cluster_blk_major, int32_t log_swizzle_size, RasterOrder raster_order) { uint64_t minor_work_idx, major_work_idx, cluster_minor_offset; if (raster_order == RasterOrder::AlongN) { minor_work_idx = static_cast<uint64_t>(tile_m); major_work_idx = static_cast<uint64_t>(tile_n); uint64_t cluster_m = divmod_cluster_shape_minor.divide(tile_m) * divmod_cluster_shape_minor.divisor; cluster_minor_offset = tile_m - cluster_m; } else { major_work_idx = static_cast<uint64_t>(tile_m); minor_work_idx = static_cast<uint64_t>(tile_n); uint64_t cluster_n = divmod_cluster_shape_minor.divide(tile_n) * divmod_cluster_shape_minor.divisor; cluster_minor_offset = tile_n - cluster_n; } uint64_t cluster_idx_minor, cluster_idx_major, cluster_major_offset; cluster_idx_minor = divmod_cluster_shape_minor.divide(minor_work_idx - cluster_minor_offset); divmod_cluster_shape_major(cluster_idx_major, cluster_major_offset, major_work_idx); uint64_t cluster_idx_minor_div_swizzle = cluster_idx_minor >> log_swizzle_size; uint64_t offset = cluster_idx_minor & ((1 << log_swizzle_size) - 1); uint64_t extra = cluster_idx_minor_div_swizzle * divmod_cluster_blk_major.divisor + cluster_idx_major; uint64_t cluster_id = (extra << log_swizzle_size) | offset; return (cluster_id * divmod_cluster_shape_major.divisor + cluster_major_offset) * divmod_cluster_shape_minor.divisor + cluster_minor_offset; } // Given the inputs, computes the total number of output blocks over which this problem will compute. // Note that this is only the logical size of our grid, not the physical grid we will actually launch. template<class ProblemShapeMNKL, class BlockShape, class ClusterShape> CUTLASS_HOST_DEVICE static dim3 get_tiled_cta_shape_mnl(ProblemShapeMNKL problem_shape_mnkl, BlockShape cta_shape, ClusterShape cluster_shape) { auto cta_m = cute::size(cute::ceil_div(cute::shape<0>(problem_shape_mnkl), cute::shape<0>(cta_shape))); auto cta_n = cute::size(cute::ceil_div(cute::shape<1>(problem_shape_mnkl), cute::shape<1>(cta_shape))); return Params::get_tiled_cta_shape_mnl( to_gemm_coord(problem_shape_mnkl), to_gemm_coord(cluster_shape), cta_m, cta_n ); } CUTLASS_DEVICE static auto work_tile_to_cta_coord(WorkTileInfo work_tile_info) { // Get every cta coord in three dimensions of the cluster auto [cta_m_in_cluster, cta_n_in_cluster, cta_l_in_cluster] = cute::block_id_in_cluster(); return make_coord( work_tile_info.M_idx + static_cast<int32_t>(cta_m_in_cluster), work_tile_info.N_idx + static_cast<int32_t>(cta_n_in_cluster), _, work_tile_info.L_idx + static_cast<int32_t>(cta_l_in_cluster) ); } // Given the inputs, computes the physical grid we should launch. template<class ProblemShapeMNKL, class BlockShape, class ClusterShape> CUTLASS_HOST_DEVICE static dim3 get_grid_shape( ProblemShapeMNKL problem_shape_mnk, BlockShape cta_shape, ClusterShape cluster_shape, KernelHardwareInfo hw_info, Arguments arguments, bool truncate_by_problem_size=true) { auto problem_shape_mnkl = cute::append<4>(problem_shape_mnk, cute::Int<1>{}); dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, cta_shape, cluster_shape); return Params::get_grid_shape( problem_blocks, to_gemm_coord(cluster_shape), hw_info, arguments.max_swizzle_size, arguments.raster_order, /* truncate_by_problem_size = */true ); } // Given the inputs, computes the physical grid we should launch. template<class ProblemShapeMNKL, class BlockShape, class ClusterShape> CUTLASS_HOST_DEVICE static dim3 get_grid_shape( Params const& params, ProblemShapeMNKL problem_shape_mnk, BlockShape cta_shape, ClusterShape cluster_shape, KernelHardwareInfo hw_info) { auto problem_shape_mnkl = cute::append<4>(problem_shape_mnk, cute::Int<1>{}); dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape_mnkl, cta_shape, cluster_shape); Arguments args{}; if constexpr (!std::is_const_v<decltype(args.max_swizzle_size)>) { args.max_swizzle_size = 1 << params.log_swizzle_size_; } args.raster_order = params.raster_order_ == RasterOrder::AlongN ? RasterOrderOptions::AlongN : RasterOrderOptions::AlongM; return Params::get_grid_shape( problem_blocks, to_gemm_coord(cluster_shape), hw_info, args.max_swizzle_size, args.raster_order, /* truncate_by_problem_size = */true ); } // Convert CTA-level work tile info to cluster-level tile coord CUTLASS_DEVICE auto work_tile_to_cluster_coord_mnkl(WorkTileInfo work_tile_info) const { // TileScheduler works at CTA-level, kernel works at cluster-level int m_coord = idx2crd(work_tile_info.M_idx / scheduler_params.cluster_shape_m_, scheduler_params.problem_tiles_m_); int n_coord = idx2crd(work_tile_info.N_idx / scheduler_params.cluster_shape_n_, scheduler_params.problem_tiles_n_); int l_coord = idx2crd(work_tile_info.L_idx, scheduler_params.problem_tiles_l_); return make_coord(m_coord, n_coord, _, l_coord); } // Returns whether the block assigned this work should compute the epilogue for the corresponding // output tile. For the basic tile scheduler, this is always true. CUTLASS_HOST_DEVICE static bool compute_epilogue(WorkTileInfo const&, Params const&) { return true; } CUTLASS_HOST_DEVICE static bool compute_epilogue(WorkTileInfo const&) { return true; } // Performs the reduction across splits for a given output tile. Since this scheduler does // not split output tiles, no reduction is needed. template <class FrgTensorC> CUTLASS_DEVICE static void fixup(Params const&, WorkTileInfo const&, FrgTensorC&, uint32_t, uint32_t) {} // Performs the reduction across splits for a given output tile. No fixup is required for // work units returned by this scheduler. template <class FrgTensorC> CUTLASS_DEVICE void fixup(WorkTileInfo const&, FrgTensorC&, uint32_t, uint32_t) const { } // Returns whether the current WorkTileInfo passed in should continue to be used. Since // this scheduler only schedules work in units of single, full output tiles, the WorkTileInfo // passed in should not be used after having been processed. CUTLASS_DEVICE static bool continue_current_work(WorkTileInfo&) { return false; } template <class ProblemShape, class TileShape> CUTLASS_HOST_DEVICE static int get_work_k_tile_count(WorkTileInfo const& work_tile_info, ProblemShape problem_shape, TileShape tile_shape) { // All work units returned by this scheduler cover the entire K iteration // space of the output tile assigned to the work unit. return cute::size(cute::ceil_div(cute::get<2>(problem_shape), cute::get<2>(tile_shape))); } CUTLASS_HOST_DEVICE static uint32_t get_work_k_tile_start(WorkTileInfo const&) { // All work units returned by this scheduler start from K tile 0 return 0u; } CUTLASS_DEVICE static bool need_separate_reduction(Params const& params) { return false; } CUTLASS_DEVICE bool is_work_tile_for_reduction(WorkTileInfo const& work_tile_info, Params const& params) { return false; } template <class FrgTensorC> CUTLASS_DEVICE void separate_reduction( Params const& params, WorkTileInfo const& work_tile_info, FrgTensorC& accumulators, uint32_t num_barriers, uint32_t barrier_idx) { } // Shares the accumulator set with peers in the global workspace template <class FrgTensorC> CUTLASS_DEVICE static void share( Params const& params, WorkTileInfo const& work_tile_info, FrgTensorC& accumulators, uint32_t num_barriers, uint32_t barrier_idx) { } CUTLASS_DEVICE static bool valid_warpgroup_in_work_tile(WorkTileInfo const& work_tile_info) { return true; } CUTLASS_DEVICE static bool requires_separate_reduction(Params const& params) { return false; } public: // Sink scheduler params as a member Params scheduler_params; }; } // namespace cutlass::gemm::kernel::detail
include/cutlass/gemm/kernel/static_tile_scheduler.hpp/0
{ "file_path": "include/cutlass/gemm/kernel/static_tile_scheduler.hpp", "repo_id": "include", "token_count": 6056 }
31
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data layout of the global memory fragments, data types, and internal tile sizes. Partial specializations for threadblock::Mma operations targeting TensorOp instructions. SM80 Multi stage kernel expects stage number to be larger or equal to 3 to use asyncronous copy. */ #pragma once #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/layout/tensor_op_multiplicand_sm75.h" #include "cutlass/layout/tensor_op_multiplicand_sm80.h" #include "cutlass/gemm/warp/mma_simt_policy.h" #include "cutlass/gemm/warp/mma_simt.h" #include "cutlass/gemm/warp/default_mma_tensor_op.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" #include "cutlass/gemm/threadblock/default_mma_core.h" #include "cutlass/gemm/threadblock/default_multistage_mma_complex_core.h" #include "cutlass/gemm/threadblock/default_multistage_mma_complex_core_sm80.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h" #include "cutlass/gemm/threadblock/mma_multistage.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for double-precision /// /// A: column-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::ColumnMajor, double, layout::ColumnMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::ColumnMajor; using ElementB = double; using LayoutB = layout::ColumnMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 64; /// Default Operator using Operator = Operator_; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b; using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; // // Iterators to write to shared memory // /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; /// Partial specialization for double-precision /// /// A: column-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::ColumnMajor, double, layout::RowMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::ColumnMajor; using ElementB = double; using LayoutB = layout::RowMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 64; /// Default Operator using Operator = Operator_; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous64b; // Shared memory layout using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpStripedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for double-precision /// /// A: row-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::RowMajor, double, layout::ColumnMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::RowMajor; using ElementB = double; using LayoutB = layout::ColumnMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 64; /// Default Operator using Operator = Operator_; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise; using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicand64bCrosswise; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// /// Partial specialization for double-precision /// /// A: row-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::RowMajor, double, layout::RowMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::RowMajor; using ElementB = double; using LayoutB = layout::RowMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 64; /// Default Operator using Operator = Operator_; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicand64bCrosswise; using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous64b; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpStripedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<16, 2>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for double-precision /// /// A: column-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::AffineRank2ColumnMajor, double, layout::AffineRank2ColumnMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::AffineRank2ColumnMajor; using ElementB = double; using LayoutB = layout::AffineRank2ColumnMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::ColumnMajor, ElementC, LayoutC, arch::OpClassTensorOp, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; /// Partial specialization for double-precision /// /// A: column-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::AffineRank2ColumnMajor, double, layout::AffineRank2RowMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::AffineRank2ColumnMajor; using ElementB = double; using LayoutB = layout::AffineRank2RowMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementC, LayoutC, arch::OpClassTensorOp, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for double-precision /// /// A: row-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::AffineRank2RowMajor, double, layout::AffineRank2ColumnMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::AffineRank2RowMajor; using ElementB = double; using LayoutB = layout::AffineRank2ColumnMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementC, LayoutC, arch::OpClassTensorOp, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; //////////////////////////////////////////////////////////////////////////////// /// /// Partial specialization for double-precision /// /// A: row-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, double, layout::AffineRank2RowMajor, double, layout::AffineRank2RowMajor, double, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = double; using LayoutA = layout::AffineRank2RowMajor; using ElementB = double; using LayoutB = layout::AffineRank2RowMajor; using ElementC = double; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementC, LayoutC, arch::OpClassTensorOp, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for float-precision /// /// ElementA: complex<float> /// ElementB: complex<float> /// ElementC: complex<float> /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Layout for A operand typename LayoutA_, /// Layout for B operand typename LayoutB_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// per-element transformation for elements of A ComplexTransform TransformA_, /// per-element transformation for elements of B ComplexTransform TransformB_ > struct DefaultMmaCore< Shape_, WarpShape_, GemmShape<16, 8, 8>, complex<float>, LayoutA_, complex<float>, LayoutB_, complex<float>, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB, TransformA_, TransformB_, true> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = GemmShape<16, 8, 8>; using ElementA = complex<float>; using LayoutA = LayoutA_; using ElementB = complex<float>; using LayoutB = LayoutB_; using ElementC = complex<float>; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; static const ComplexTransform TransformA = TransformA_; static const ComplexTransform TransformB = TransformB_; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; static_assert( platform::is_same<Operator, arch::OpMultiplyAddComplex>::value || platform::is_same<Operator, arch::OpMultiplyAddGaussianComplex>::value || platform::is_same<Operator, arch::OpMultiplyAddComplexFastF32>::value, "The operator tag must indicate complex multiplication."); // // Underlying template // using MmaComplexCore = DefaultMultistageMmaComplexCore< Shape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, arch::OpClassTensorOp, kStages, TransformA, TransformB, Operator, kCacheOpA, kCacheOpB >; // // Shared memory layouts // using SmemLayoutA = typename MmaComplexCore::SmemLayoutA; // Shared memory layout using SmemLayoutB = typename MmaComplexCore::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename MmaComplexCore::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename MmaComplexCore::SmemIteratorA; /// ThreadMap of iterator B using IteratorThreadMapB = typename MmaComplexCore::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename MmaComplexCore::SmemIteratorB; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename MmaComplexCore::MmaTensorOp; /// Policy used to define MmaPipelined using MmaPolicy = typename MmaComplexCore::MmaPolicy; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for double-precision /// /// ElementA: complex<double> /// ElementB: complex<double> /// ElementC: complex<double> /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout for A operand typename LayoutA_, /// Layout for B operand typename LayoutB_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// per-element transformation for elements of A ComplexTransform TransformA_, /// per-element transformation for elements of B ComplexTransform TransformB_ > struct DefaultMmaCore< Shape_, WarpShape_, InstructionShape_, complex<double>, LayoutA_, complex<double>, LayoutB_, complex<double>, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB, TransformA_, TransformB_, true> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = complex<double>; using LayoutA = LayoutA_; using ElementB = complex<double>; using LayoutB = LayoutB_; using ElementC = complex<double>; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; static const ComplexTransform TransformA = TransformA_; static const ComplexTransform TransformB = TransformB_; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); static_assert(WarpCount::kCount > 1, "This specialization requires at least two warps."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 64; /// Default Operator using Operator = Operator_; static_assert( platform::is_same<Operator, arch::OpMultiplyAddComplex>::value || platform::is_same<Operator, arch::OpMultiplyAddGaussianComplex>::value, "The operator tag must indicate complex multiplication."); // // Underlying template // using MmaComplexCore = DefaultMultistageMmaComplexCore< Shape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, arch::OpClassTensorOp, kStages, TransformA, TransformB, Operator, kCacheOpA, kCacheOpB >; // // Shared memory layouts // using SmemLayoutA = typename MmaComplexCore::SmemLayoutA; // Shared memory layout using SmemLayoutB = typename MmaComplexCore::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename MmaComplexCore::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename MmaComplexCore::SmemIteratorA; /// ThreadMap of iterator B using IteratorThreadMapB = typename MmaComplexCore::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename MmaComplexCore::SmemIteratorB; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename MmaComplexCore::MmaTensorOp; /// Policy used to define MmaPipelined using MmaPolicy = typename MmaComplexCore::MmaPolicy; }; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::ColumnMajor, ElementB_, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = platform::min(Shape::kM / (kAccessSizeInBits / sizeof_bits<ElementA>::value), 8); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; static int const kWarpThreadArrangementContiguousB = platform::min(Shape::kN / (kAccessSizeInBits / sizeof_bits<ElementB>::value), 8); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // static int const Crosswise_A = platform::min(int(128 / sizeof(ElementA)), Shape::kM); using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<ElementA>::value, Crosswise_A>; // Shared memory layout static int const Crosswise_B = platform::min(int(128 / sizeof(ElementB)), Shape::kN); using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous< sizeof_bits<ElementB>::value, Crosswise_B>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousA, kWarpThreadArrangementStridedA>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousB, kWarpThreadArrangementStridedB>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::RowMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; static int const kWarpThreadArrangementContiguousB = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementB>::value); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementA>::value, Shape::kK>; // Shared memory layout using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementB>::value, Shape::kK>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousA, kWarpThreadArrangementStridedA>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousB, kWarpThreadArrangementStridedB>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::ColumnMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = platform::min(Shape::kM / (kAccessSizeInBits / sizeof_bits<ElementA>::value), 8); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; static int const kWarpThreadArrangementContiguousB = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // static int const Crosswise_A = platform::min(int(128 / sizeof(ElementA)), Shape::kM); using SmemLayoutA = layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<ElementA>::value, Crosswise_A>; // Shared memory layout using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementB>::value, Shape::kK>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousA, kWarpThreadArrangementStridedA>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousB, kWarpThreadArrangementStridedB>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::RowMajor, ElementB_, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; static int const kWarpThreadArrangementContiguousB = platform::min(Shape::kN / (kAccessSizeInBits / sizeof_bits<ElementB>::value), 8); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementA>::value, Shape::kK>; // Shared memory layout static int const Crosswise_B = platform::min(int(128 / sizeof(ElementB)), Shape::kN); using SmemLayoutB = layout::RowMajorTensorOpMultiplicandCongruous< sizeof_bits<ElementB>::value, Crosswise_B>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousA, kWarpThreadArrangementStridedA>, kAccessSizeInBits / sizeof_bits<ElementA>::value>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, layout::PitchLinearShape<kWarpThreadArrangementContiguousB, kWarpThreadArrangementStridedB>, kAccessSizeInBits / sizeof_bits<ElementB>::value>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major-interleaved /// B: row-major-interleaved /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes /// /// Column/RowMajorInterleved<InterleavedK>(m, n) is mapped to Column/RowMajor(m /// x InterleavedK, n / InterleavedK) so that Column/RowMajor global iterators /// can be reused. The shared store iterator is the same as the crosswise shared /// store iterator. So, the only thing we need to do is to swap the coordinates /// (contiguous <=> strided) used by the global iterator and the shared store /// iterator. template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by MMA typename Operator_, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// Number of interleaved K int InterleavedK> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::ColumnMajorInterleaved<InterleavedK>, ElementB_, layout::RowMajorInterleaved<InterleavedK>, ElementC_, LayoutC_, arch::OpClassTensorOp, Stages, Operator_, AccumulatorsInRowMajor, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>; using ElementB = ElementB_; using LayoutB = layout::RowMajorInterleaved<InterleavedK>; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; static int const kInterleavedK = InterleavedK; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kElementsPerAccess = kAccessSizeInBits / sizeof_bits<ElementA>::value; static int const kWarpThreadArrangementContiguous = kInterleavedK / kElementsPerAccess; static int const kWarpThreadArrangementStrided = kWarpSize / kWarpThreadArrangementContiguous; // // Shared memory layouts // using SmemLayoutA = layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementA>::value, kInterleavedK>; // Shared memory layout using SmemLayoutB = layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<ElementB>::value, kInterleavedK>; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kM * kInterleavedK, Shape::kK / kInterleavedK>, kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>; /// Transpose the ThreadMap of iterator A using SmemThreadMapA = transform::TransposePitchLinearThreadMap< IteratorThreadMapA, layout::PitchLinearShape<kWarpThreadArrangementContiguous, kWarpThreadArrangementStrided>>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, SmemThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearWarpRakedThreadMap< layout::PitchLinearShape<Shape::kN * kInterleavedK, Shape::kK / kInterleavedK>, kThreads, layout::PitchLinearShape<32, 1>, kElementsPerAccess>; /// Transpose the ThreadMap of iterator A using SmemThreadMapB = transform::TransposePitchLinearThreadMap< IteratorThreadMapB, layout::PitchLinearShape<kWarpThreadArrangementContiguous, kWarpThreadArrangementStrided>>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, SmemThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, WarpCount::kK, AccumulatorsInRowMajor>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::ColumnMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; // Shared memory layout using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, IteratorThreadMapA>; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator B using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, SmemThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = 4; static const int WarpNumThreadsN = 8; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); static_assert(!((Shape::kK / 32) % LaneN), "Padding must be divisible by Lane"); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) >; /// Used for partial specialization /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<0, 0>, MatrixShape<0, Shape::kK / 32>, WarpCount::kK>; }; /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::ColumnMajor, ElementB_, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; // Shared memory layout using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, IteratorThreadMapA>; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = 4; static const int WarpNumThreadsN = 8; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) >; /// Used for partial specialization /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::RowMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; // Shared memory layout using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator A using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, SmemThreadMapA>; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator B using SmemThreadMapB = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapB>; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, SmemThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = 4; static const int WarpNumThreadsN = 8; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); static_assert(!((Shape::kK / 32) % LaneM) && !((Shape::kK / 32) % LaneN), "Padding must be divisible by Lane"); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) >; /// Used for partial specialization /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<Shape::kK / 32, 0>, MatrixShape<0, Shape::kK / 32>, WarpCount::kK>; }; /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::RowMajor, ElementB_, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kElementsPerAccess = 1; // // Shared memory layouts // using SmemLayoutA = layout::ColumnMajor; // Shared memory layout using SmemLayoutB = layout::RowMajor; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, kElementsPerAccess >; /// Transpose the ThreadMap of iterator A using SmemThreadMapA = transform::TransposePitchLinearThreadMapSimt<IteratorThreadMapA>; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 0, SmemThreadMapA>; /// Policy of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, kElementsPerAccess >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileAccessIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 1, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level op static const int WarpNumThreadsM = 4; static const int WarpNumThreadsN = 8; static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN), "WarpShape must be divisible by ThreadTile shape."); static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM; static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN; static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1; static const int numElementsA = 128 / sizeof_bits<ElementA>::value; static const int numElementsB = 128 / sizeof_bits<ElementB>::value; static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM); static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN); static_assert(!((Shape::kK / 32) % LaneM), "Padding must be divisible by Lane"); // these should have max of thread tile also using LaneMmaShape = cutlass::gemm::GemmShape< LaneM, LaneN, 1>; using Policy = cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout LaneMmaShape >; using MmaWarpSimt = cutlass::gemm::warp::MmaSimt< WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8 ElementA, /// Data type of A elements SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout) ElementB, /// Data type of B elements SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout) ElementC, /// Element type of C matrix LayoutC, /// Layout of C matrix (concept: MatrixLayout) Policy /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) >; /// Used for partial specialization /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaWarpSimt, MatrixShape<Shape::kK / 32, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2RowMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::AffineRank2ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::AffineRank2RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::RowMajor, ElementC, LayoutC, arch::OpClassSimt, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::AffineRank2RowMajor; using ElementB = ElementB_; using LayoutB = layout::AffineRank2ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::ColumnMajor, ElementC, LayoutC, arch::OpClassSimt, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::AffineRank2ColumnMajor, ElementB_, layout::AffineRank2ColumnMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::AffineRank2ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::AffineRank2ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::ColumnMajor, ElementB, layout::ColumnMajor, ElementC, LayoutC, arch::OpClassSimt, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; /// Partial specialization for SIMT GEMMs using multistage pipeline. /// /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Number of stages int Stages, /// Operation performed by Simt typename Operator_, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::AffineRank2RowMajor, ElementB_, layout::AffineRank2RowMajor, ElementC_, LayoutC_, arch::OpClassSimt, Stages, Operator_, false, CacheOpA, CacheOpB> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::AffineRank2RowMajor; using ElementB = ElementB_; using LayoutB = layout::AffineRank2RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; static int const kStages = Stages; static cutlass::arch::CacheOperation::Kind const kCacheOpA = cutlass::arch::CacheOperation::Always; static cutlass::arch::CacheOperation::Kind const kCacheOpB = cutlass::arch::CacheOperation::Always; /// Default Operator using Operator = Operator_; using Base = DefaultMmaCore<Shape, WarpShape, InstructionShape, ElementA, layout::RowMajor, ElementB, layout::RowMajor, ElementC, LayoutC, arch::OpClassSimt, kStages, Operator, false, kCacheOpA, kCacheOpB>; // // Shared memory layouts // using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = typename Base::IteratorThreadMapA; /// Shared memory iterator to A operand using SmemIteratorA = typename Base::SmemIteratorA; /// Policy of iterator B using IteratorThreadMapB = typename Base::IteratorThreadMapB; /// Shared memory iterator to B operand using SmemIteratorB = typename Base::SmemIteratorB; // // Warp-level matrix multiply operator // /// Policy used to define MmaPipelined using MmaPolicy = typename Base::MmaPolicy; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass
include/cutlass/gemm/threadblock/default_mma_core_sm80.h/0
{ "file_path": "include/cutlass/gemm/threadblock/default_mma_core_sm80.h", "repo_id": "include", "token_count": 38063 }
32
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a multistage threadblock-scoped Blocked-Ell MMA. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/gemm/threadblock/mma_base.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// Data type of accumulator matrix typename ElementC_, /// Data type of accumulator matrix typename LayoutC_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class EllMmaMultistage : public MmaBase<Shape_, Policy_, Stages> { public: ///< Base class using Base = MmaBase<Shape_, Policy_, Stages>; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Iterates over tiles of A operand in global memory using IteratorA = IteratorA_; ///< Iterates over tiles of B operand in global memory using IteratorB = IteratorB_; ///< Data type of accumulator matrix using ElementC = ElementC_; ///< Layout of accumulator matrix using LayoutC = LayoutC_; ///< Policy describing tuning details using Policy = Policy_; using SmemIteratorA = SmemIteratorA_; using SmemIteratorB = SmemIteratorB_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; using EllIterator = typename cutlass::transform::threadblock::ell::Iterator; // // Dependent types // /// Fragment of accumulator tile using FragmentC = typename Policy::Operator::FragmentC; /// Warp-level Mma using Operator = typename Policy::Operator; /// Minimum architecture is Sm80 to support cp.async using ArchTag = arch::Sm80; /// Complex transform on A operand static ComplexTransform const kTransformA = Operator::kTransformA; /// Complex transform on B operand static ComplexTransform const kTransformB = Operator::kTransformB; /// Internal structure exposed for introspection. struct Detail { static_assert(Base::kWarpGemmIterations > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); /// Number of cp.async instructions to load one stage of operand A static int const AsyncCopyIterationsPerStageA = IteratorA::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const AsyncCopyIterationsPerStageB = IteratorB::ThreadMap::Iterations::kCount; /// Number of stages static int const kStages = Stages; /// Number of cp.async instructions to load on group of operand A static int const kAccessesPerGroupA = (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB = (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; }; private: using WarpLoadedFragmentA = typename Operator::FragmentA; using WarpLoadedFragmentB = typename Operator::FragmentB; using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; private: // // Data members // /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA smem_iterator_A_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB smem_iterator_B_; public: /// Construct from tensor references CUTLASS_DEVICE EllMmaMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx ): Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_B_.add_tile_offset( {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); } template<bool is_A_sparse, bool is_offset_constant> CUTLASS_DEVICE void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B, EllIterator &ell_iter, int group_start_A = 0, int group_start_B = 0) { iterator_A.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector); this->smem_iterator_A_.set_iteration_index(group_start_A); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_A.get(); bool is_valid = iterator_A.valid(); if (!is_A_sparse){ if (is_offset_constant){ auto ell_offset = ell_iter.get_offset_fast(); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += ell_offset * sizeof(IteratorA::Element) / kSrcBytes; } else { int k_offset = iterator_A.get_k(); auto ell_offset = ell_iter.get_offset(k_offset); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += (ell_offset * sizeof(IteratorA::Element)) / kSrcBytes; } } cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, gmem_ptr, is_valid); ++iterator_A; } ++this->smem_iterator_A_; } } iterator_B.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector); this->smem_iterator_B_.set_iteration_index(group_start_B); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_B.get(); bool is_valid = iterator_B.valid(); if (is_A_sparse){ if (is_offset_constant){ auto ell_offset = ell_iter.get_offset_fast(); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += ell_offset * sizeof(IteratorB::Element) / kSrcBytes; } else { int k_offset = iterator_B.get_k(); auto ell_offset = ell_iter.get_offset(k_offset); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += ( ell_offset * sizeof(IteratorB::Element)) / kSrcBytes; } } cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, gmem_ptr, is_valid); ++iterator_B; } ++this->smem_iterator_B_; } } } /// Perform a threadblock-scoped matrix multiply-accumulate template<bool is_A_sparse, bool is_offset_constant> CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations, ///< destination accumulator tile FragmentC &accum, ///< iterator over A operand in global memory IteratorA iterator_A, ///< iterator over B operand in global memory IteratorB iterator_B, ///< initial value of accumulator FragmentC const &src_accum, EllIterator &ell_iterator ) { // // Prologue // // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) { iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); iterator_A.set_iteration_index(0); this->smem_iterator_A_.set_iteration_index(0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; auto gmem_ptr = iterator_A.get(); bool is_valid = iterator_A.valid(); if (!is_A_sparse){ if (is_offset_constant){ auto ell_offset = ell_iterator.get_offset_fast(); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += ell_offset * sizeof(IteratorA::Element) / kSrcBytes; } else { int k_offset = iterator_A.get_k(); auto ell_offset = ell_iterator.get_offset(k_offset); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += (ell_offset * sizeof(IteratorA::Element)) / kSrcBytes; } } cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, gmem_ptr, is_valid); ++iterator_A; } ++this->smem_iterator_A_; } iterator_B.set_iteration_index(0); this->smem_iterator_B_.set_iteration_index(0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; auto gmem_ptr = iterator_B.get(); bool is_valid = iterator_B.valid(); if (is_A_sparse){ if (is_offset_constant){ auto ell_offset = ell_iterator.get_offset_fast(); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += ell_offset * sizeof(IteratorB::Element) / kSrcBytes; } else { int k_offset = iterator_B.get_k(); auto ell_offset = ell_iterator.get_offset(k_offset); is_valid = is_valid && (ell_offset >= 0); gmem_ptr += ( ell_offset * sizeof(IteratorB::Element)) / kSrcBytes; } } cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, gmem_ptr, is_valid); ++iterator_B; } ++this->smem_iterator_B_; } // Move to the next stage iterator_A.add_tile_offset({0, 1}); iterator_B.add_tile_offset({1, 0}); ++ell_iterator; this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Defines the boundary of a stage of cp.async. cutlass::arch::cp_async_fence(); } // Perform accumulation in the 'd' output operand accum = src_accum; // Waits until kStages-2 stages have committed. cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA warp_loaded_frag_A[2]; WarpLoadedFragmentB warp_loaded_frag_B[2]; WarpTransformedFragmentA warp_transformed_frag_A[2]; WarpTransformedFragmentB warp_transformed_frag_B[2]; Operator warp_mma; this->warp_tile_iterator_A_.set_kgroup_index(0); this->warp_tile_iterator_B_.set_kgroup_index(0); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); if (is_A_sparse){ iterator_A.ell_add_mask(ell_iterator.get_blocksize()); } else { iterator_B.ell_add_mask(ell_iterator.get_blocksize()); } int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], warp_loaded_frag_A[0], warp_loaded_frag_B[0]); // tf32x3 kernels use staging accumulation. warp_mma uses a temporary // accumulator and this temporary accumulator is added to the final // accumulator once in every mainloop iteration. plus<FragmentC> plus_accum; FragmentC tmp_accum; if (platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { tmp_accum.clear(); } // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations > (-Base::kStages + 1);) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; if (warp_mma_k > 0) warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], warp_loaded_frag_A[warp_mma_k % 2], warp_loaded_frag_B[warp_mma_k % 2]); if (platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { warp_mma( tmp_accum, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], tmp_accum ); if (warp_mma_k == 0) { accum = plus_accum(accum, tmp_accum); tmp_accum.clear(); } } else { warp_mma( accum, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], accum ); } // Issue global->shared copies for the this stage if (warp_mma_k < Base::kWarpGemmIterations - 1) { int group_start_iteration_A, group_start_iteration_B; group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA; group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB; copy_tiles_and_advance<is_A_sparse, is_offset_constant>( iterator_A, iterator_B, ell_iterator, group_start_iteration_A, group_start_iteration_B); } if (warp_mma_k + 2 == Base::kWarpGemmIterations) { int group_start_iteration_A, group_start_iteration_B; group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA; group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB; copy_tiles_and_advance<is_A_sparse, is_offset_constant>( iterator_A, iterator_B, ell_iterator, group_start_iteration_A, group_start_iteration_B); // Inserts a memory fence between stages of cp.async instructions. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages have committed. arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_A.add_tile_offset({0, 1}); iterator_B.add_tile_offset({1, 0}); ++ell_iterator; this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_A_.add_tile_offset( {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); this->warp_tile_iterator_B_.add_tile_offset( {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } --gemm_k_iterations; iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); } // Do any conversions feeding the first stage at the end of the loop so // we can start right away on mma instructions if (warp_mma_k + 1 == Base::kWarpGemmIterations) warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], warp_transformed_frag_B[(warp_mma_k + 1) % 2], warp_loaded_frag_A[(warp_mma_k + 1) % 2], warp_loaded_frag_B[(warp_mma_k + 1) % 2]); } } if (platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { accum = plus_accum(accum, tmp_accum); } // Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/gemm/threadblock/ell_mma_multistage.h/0
{ "file_path": "include/cutlass/gemm/threadblock/ell_mma_multistage.h", "repo_id": "include", "token_count": 10488 }
33
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing warp-level matrix multiply-accumulate operations targeting Tensor Cores. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/platform/platform.h" #include "cutlass/numeric_conversion.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/arch/mma_sm75.h" #include "cutlass/arch/mma_sm80.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/mma.h" #include "cutlass/gemm/warp/mma_tensor_op_policy.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { //////////////////////////////////////////////////////////////////////////////// // Shuffle registers for layout conversion //////////////////////////////////////////////////////////////////////////////// template < /// Element type for the operand in registers for the mma.sync typename ElementMma_, /// Element type for the operand in shared memory for ldmatrix typename ElementLoad_, /// Number of mma.sync operations performed along rows or columns int NumMmaInstructions, /// Number of elements in warp fragment int NumElementsInWarpFragment, /// Number of elements in mma fragment int NumElementsInMmaFragment, /// Identifies A or B multiplicand Operand Operand_, /// typename Enable = void > struct FragmentShuffler { public: using ElementMma = ElementMma_; using ElementLoad = ElementLoad_; static int const kNumMmaInstructions = NumMmaInstructions; static int const kNumElementsInWarpFragment = NumElementsInWarpFragment; static int const kNumElementsInMmaFragment = NumElementsInMmaFragment; static Operand const kOperand = Operand_; using WarpFragment = Array<ElementLoad, kNumElementsInWarpFragment>; using MmaFragment = Array<ElementLoad, kNumElementsInMmaFragment>; CUTLASS_DEVICE WarpFragment operator()(WarpFragment const &src) { return src; } }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for `mma.sync` on 16b (F16/BF16) and `ldmatrix` on 8b (S8/U8) /// for operand A multiplicand going through upcasting. template < /// Element type for the operand in registers for the mma.sync typename ElementMma_, /// Element type for the operand in shared memory for ldmatrix typename ElementLoad_, /// Number of mma.sync operations performed along rows or columns int NumMmaInstructions, /// Number of elements in warp fragment int NumElementsInWarpFragment, /// Number of elements in mma fragment int NumElementsInMmaFragment > struct FragmentShuffler <ElementMma_, ElementLoad_, NumMmaInstructions, NumElementsInWarpFragment, NumElementsInMmaFragment, Operand::kA, typename platform::enable_if<(sizeof_bits<ElementMma_>::value == 16) && (sizeof_bits<ElementLoad_>::value == 8)>::type> { public: using ElementMma = ElementMma_; using ElementLoad = ElementLoad_; static int const kNumMmaInstructions = NumMmaInstructions; static int const kNumElementsInWarpFragment = NumElementsInWarpFragment; static int const kNumElementsInMmaFragment = NumElementsInMmaFragment; static Operand const kOperand = Operand::kA; using WarpFragment = Array<ElementLoad, kNumElementsInWarpFragment>; using MmaFragment = Array<ElementLoad, kNumElementsInMmaFragment>; static uint32_t const kSelectBytesEvenThread = 0x5410; static uint32_t const kSelectBytesOddThread = 0x7632; private: int delta_up_; int delta_down_; int odd_even_lane_id_; uint32_t byte_selector_; public: CUTLASS_DEVICE FragmentShuffler() { int lane_id = cutlass::arch::LaneId(); delta_up_ = (lane_id & 1) + ((lane_id & 2) >> 1); delta_down_ = 2 - delta_up_; odd_even_lane_id_ = static_cast<int>(lane_id & 1); byte_selector_ = odd_even_lane_id_ * kSelectBytesOddThread + (1 - odd_even_lane_id_) * kSelectBytesEvenThread; } CUTLASS_DEVICE WarpFragment operator()(WarpFragment const &src) { WarpFragment result; MmaFragment const* mma_frag_src_ptr = reinterpret_cast<MmaFragment const*>(&src); MmaFragment* mma_frag_dst_ptr = reinterpret_cast<MmaFragment*>(&result); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < kNumMmaInstructions; n++) { uint32_t const* src_ptr = reinterpret_cast<uint32_t const *>(&mma_frag_src_ptr[n]); uint32_t *dst_ptr = reinterpret_cast<uint32_t *>(&mma_frag_dst_ptr[n]); // Shuffle data within the warp, pull from other threads within the warp uint32_t tmp0 = __shfl_up_sync(0xFFFFFFFF, src_ptr[0], delta_up_); uint32_t tmp1 = __shfl_down_sync(0xFFFFFFFF, src_ptr[0], delta_down_); uint32_t tmp2 = __shfl_up_sync(0xFFFFFFFF, src_ptr[1], delta_up_); uint32_t tmp3 = __shfl_down_sync(0xFFFFFFFF, src_ptr[1], delta_down_); // Reorder the data within the 32-bit word (4x8b) required for mma.sync dst_ptr[0] = __byte_perm(tmp0, tmp2, byte_selector_); dst_ptr[1] = __byte_perm(tmp1, tmp3, byte_selector_); } return result; } }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for `mma.sync` on 16b (F16/BF16) and `ldmatrix` on 8b (S8/U8) /// for operand B multiplicand going through upcasting. template < /// Element type for the operand in registers for the mma.sync typename ElementMma_, /// Element type for the operand in shared memory for ldmatrix typename ElementLoad_, /// Number of mma.sync operations performed along rows or columns int NumMmaInstructions, /// Number of elements in warp fragment int NumElementsInWarpFragment, /// Number of elements in mma fragment int NumElementsInMmaFragment > struct FragmentShuffler <ElementMma_, ElementLoad_, NumMmaInstructions, NumElementsInWarpFragment, NumElementsInMmaFragment, Operand::kB, typename platform::enable_if<(sizeof_bits<ElementMma_>::value == 16) && (sizeof_bits<ElementLoad_>::value == 8)>::type> { public: using ElementMma = ElementMma_; using ElementLoad = ElementLoad_; static int const kNumMmaInstructions = NumMmaInstructions; static int const kNumElementsInWarpFragment = NumElementsInWarpFragment; static int const kNumElementsInMmaFragment = NumElementsInMmaFragment; static Operand const kOperand = Operand::kB; using WarpFragment = Array<ElementLoad, kNumElementsInWarpFragment>; using MmaFragment = Array<ElementLoad, kNumElementsInMmaFragment>; static uint32_t const kSelectBytesEvenThread = 0x5410; static uint32_t const kSelectBytesOddThread = 0x7632; private: int delta_up_; int delta_down_; int odd_even_lane_id_; uint32_t byte_selector_; public: CUTLASS_DEVICE FragmentShuffler() { int lane_id = cutlass::arch::LaneId(); delta_up_ = (lane_id & 1) + ((lane_id & 2) >> 1); delta_down_ = 2 - delta_up_; odd_even_lane_id_ = static_cast<int>(lane_id & 1); byte_selector_ = odd_even_lane_id_ * kSelectBytesOddThread + (1 - odd_even_lane_id_) * kSelectBytesEvenThread; } CUTLASS_DEVICE WarpFragment operator()(WarpFragment const &src) { WarpFragment result; MmaFragment const* mma_frag_src_ptr = reinterpret_cast<MmaFragment const *>(&src); MmaFragment* mma_frag_dst_ptr = reinterpret_cast<MmaFragment *>(&result); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < kNumMmaInstructions; n++) { uint32_t const* src_ptr = reinterpret_cast<uint32_t const*>(&mma_frag_src_ptr[n]); uint32_t* dst_ptr = reinterpret_cast<uint32_t*>(&mma_frag_dst_ptr[n]); // Shuffle data within the warp, pull from other threads within the warp uint32_t tmp0 = __shfl_up_sync(0xFFFFFFFF, src_ptr[0], delta_up_); uint32_t tmp1 = __shfl_down_sync(0xFFFFFFFF, src_ptr[0], delta_down_); // Reorder the data within the 32-bit word (4x8b) required for mma.sync dst_ptr[0] = __byte_perm(tmp0, tmp1, byte_selector_); } return result; } }; //////////////////////////////////////////////////////////////////////////////// // Data type conversion //////////////////////////////////////////////////////////////////////////////// template < /// Destination type typename ElementDst_, /// Source type typename ElementSrc_, /// Number of elements int N, /// typename Enable = void> struct FragmentConverter { using ElementDst = ElementDst_; using ElementSrc = ElementSrc_; // Operand fragment registers in destination and source types using DestinationFragment = Array<ElementDst, N>; using SourceFragment = Array<ElementSrc, N>; FastNumericArrayConverter<ElementDst, ElementSrc, N> convert; CUTLASS_DEVICE DestinationFragment operator()(SourceFragment const &src) const { return convert(src); } }; //////////////////////////////////////////////////////////////////////////////// // Partial specialization for when Destination type is the *same* as // Source type template < /// Data type typename Element, /// Number of elements int N, /// typename Enable> struct FragmentConverter<Element, Element, N, Enable> { using DestinationFragment = Array<Element, N>; using SourceFragment = Array<Element, N>; CUTLASS_DEVICE DestinationFragment operator()(SourceFragment const &src) const { return src; } }; } // namespace detail /// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) typename Policy_, /// Number of partitions along K dimension int PartitionsK_ = 1, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor = false, /// Used for partial specialization typename Enable = bool > class MmaMixedInputTensorOp { public: /// Shape of warp-level matrix operation (concept: GemmShape) using Shape = Shape_; /// Data type of multiplicand A using ElementA = ElementA_; /// Layout of multiplicand A using LayoutA = LayoutA_; /// Data type of multiplicand B using ElementB = ElementB_; /// Layout of multiplicand B using LayoutB = LayoutB_; /// Data type of accumulator matrix C using ElementC = ElementC_; /// Layout of accumulator matrix C using LayoutC = LayoutC_; /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) using Policy = Policy_; /// Underlying matrix multiply operator (concept: arch::Mma) using ArchMmaOperator = typename Policy::Operator; /// Underlying arch::Mma instruction datatype for A operand using ElementAMma = typename ArchMmaOperator::ElementA; /// Underlying arch::Mma instruction datatype for B operand using ElementBMma = typename ArchMmaOperator::ElementB; /// Underlying arch::Mma instruction datatype for C operand using MmaElementC = typename ArchMmaOperator::ElementC; /// Indicates math operator using MathOperator = typename ArchMmaOperator::Operator; /// Architecture tag from underlying instruction using ArchTag = typename ArchMmaOperator::ArchTag; /// Indicates class of matrix operator using OperatorClass = arch::OpClassTensorOp; /// Shape of underlying instruction using InstructionShape = typename ArchMmaOperator::Shape; /// Complex transform on A operand static ComplexTransform const kTransformA = ComplexTransform::kNone; /// Complex transform on B operand static ComplexTransform const kTransformB = ComplexTransform::kNone; /// Number of threads participating in warp-level matrix product static int const kThreadCount = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; /// // static int const kLoadShapeK = InstructionShape::kK * // (sizeof_bits<ElementAMma>::value / sizeof_bits<ElementB>::value); public: /// Iterates over the A operand in Shared Memory using IteratorA = MmaTensorOpMultiplicandTileIterator< MatrixShape<Shape::kM, Shape::kK>, Operand::kA, ElementA, LayoutA, MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>, Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; /// Storage for A tile in registers (loaded from Shared Memory) using FragmentA = typename IteratorA::Fragment; /// Storage for transformed A tile in registers (for use in Mma instruction) using TransformedFragmentA = Array<ElementAMma, FragmentA::kElements>; /// Underlying arch::Mma instruction operand fragement for matrix A using MmaOperandA = typename ArchMmaOperator::FragmentA; /// Iterates over the B operand in Shared Memory using IteratorB = MmaTensorOpMultiplicandTileIterator< MatrixShape<Shape::kK, Shape::kN>, Operand::kB, ElementB, LayoutB, MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>, Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; /// Storage for B tile in registers (loaded from Shared Memory) using FragmentB = typename IteratorB::Fragment; /// Storage for transformed B tile in registers (for use in Mma instruction) using TransformedFragmentB = Array<ElementBMma, FragmentB::kElements>; /// Underlying arch::Mma instruction operand fragement for matrix B using MmaOperandB = typename ArchMmaOperator::FragmentB; /// Iterates over the C operand in memory using IteratorC = MmaTensorOpAccumulatorTileIterator< MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC, typename ArchMmaOperator::Shape, typename Policy::OpDelta>; /// Storage for C tile using FragmentC = typename IteratorC::Fragment; /// Underlying arch::Mma instruction operand fragement for matrix C using MmaOperandC = typename ArchMmaOperator::FragmentC; /// Number of mma operations performed using MmaIterations = MatrixShape< (Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM, (Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN >; public: /// Underlying matrix multiply operator (concept: arch::Mma) ArchMmaOperator mma; public: // // Methods // /// Ctor CUTLASS_DEVICE MmaMixedInputTensorOp() {} /// Performs a warp-level matrix multiply-accumulate operation CUTLASS_DEVICE void operator()( FragmentC &D, TransformedFragmentA const &A, TransformedFragmentB const &B, FragmentC const &C ) const { D = C; MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A); MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B); MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D); CUTLASS_PRAGMA_UNROLL for (int m = 0; m < MmaIterations::kRow; ++m) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < MmaIterations::kColumn; ++n) { int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n); if (AccumulatorsInRowMajor) { // matrix B is reordered mma( ptr_D[n_serpentine + m * MmaIterations::kColumn], ptr_A[m], ptr_B[n_serpentine], ptr_D[n_serpentine + m * MmaIterations::kColumn]); } else { mma(ptr_D[m + n_serpentine * MmaIterations::kRow], ptr_A[m], ptr_B[n_serpentine], ptr_D[m + n_serpentine * MmaIterations::kRow]); } } } } /// Transform the operand warp fragment register to the required data types and layout /// for the `cultass::arch::Mma` CUTLASS_DEVICE void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, FragmentA const &A, FragmentB const &B) const { // Shuffle data within warp to obtain the mma.sync operand layout detail::FragmentShuffler<ElementBMma, ElementB, MmaIterations::kColumn, FragmentB::kElements, MmaOperandB::kElements, Operand::kB> shuffler_B; FragmentB tmp_B; tmp_B = shuffler_B(B); // Convert the B operand to the Mma Instruction operand type detail::FragmentConverter<ElementBMma, ElementB, FragmentB::kElements> convert_B; dst_B = convert_B(tmp_B); FragmentA tmp_A; Array<ElementA, FragmentA::kElements / 2> * ptr_tmp_A = reinterpret_cast<Array<ElementA, FragmentA::kElements / 2> *>(&tmp_A); Array<ElementAMma, FragmentA::kElements / 2> * ptr_dst_A = reinterpret_cast<Array<ElementAMma, FragmentA::kElements / 2> *>(&dst_A); // Shuffle data within warp to obtain the mma.sync operand layout detail::FragmentShuffler<ElementAMma, ElementA, MmaIterations::kRow, FragmentA::kElements, MmaOperandA::kElements, Operand::kA> shuffler_A; // Convert the A operand to the Mma Instruction operand type detail::FragmentConverter<ElementAMma, ElementA, FragmentA::kElements / 2> convert_A; tmp_A = shuffler_A(A); ptr_dst_A[0] = convert_A(ptr_tmp_A[0]); ptr_dst_A[1] = convert_A(ptr_tmp_A[1]); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/gemm/warp/mma_mixed_input_tensor_op.h/0
{ "file_path": "include/cutlass/gemm/warp/mma_mixed_input_tensor_op.h", "repo_id": "include", "token_count": 7131 }
34
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/arch/wmma.h" #if defined(CUTLASS_ARCH_WMMA_ENABLED) #include "cutlass/wmma_array.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_ref.h" #include "cutlass/matrix_shape.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor_op_multiplicand_sm75.h" #include "cutlass/platform/platform.h" #include "cutlass/fast_math.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { //////////////////////////////////////////////////////////////////////////////// template < ///< Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Operand identity (A or B) Operand Operand, /// Data type of operand typename Element_, /// Layout of operand typename Layout_, /// Delta between *MMA operations (in units of *WMMA operations, concept:MatrixShape) int OpDelta_, /// Number of threads participating in one matrix operation int Threads, /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) typename Policy_> class MmaTensorOpWmmaMultiplicandTileIterator; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread WMMA operation. /// It uses nvcuda::wmma::load_matrix_sync to load from shared /// memory and therefore must be initialized with a TensorRef to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept //////////////////////////////////////////////////////////////////////////////// template < ///< Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of elements typename Element_, /// Layout of operand typename Layout_, /// Interval between adjacent *WMMA instructions (in units of WMMA instructions) int OpDelta_, /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) typename Policy_> class MmaTensorOpWmmaMultiplicandTileIterator< Shape_, Operand::kA, Element_, Layout_, OpDelta_, 32, Policy_> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kA; /// Element type using Element = Element_; /// Layout of source tile using Layout = Layout_; /// Delta between *WMMA operations static int const kOpDelta = OpDelta_; /// Wmma Operator information and operation delta using Policy = Policy_; // // Derived quantities // /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Stride Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Native Wmma shape for operand A (concept MatrixShape) using WmmaShape = MatrixShape< Policy::Operator::Shape::kM, Policy::Operator::Shape::kK >; /// Map cutlass dataype to nvcuda::wmma datatype using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType<Element>::Type; /// Shape of individual WMMA load / stores for operand A using Iterations = MatrixShape< Shape::kRow / WmmaShape::kRow, 1 >; /// Fragment object holding a warps part using Fragment = WmmaFragmentArray<typename Policy::Operator::FragmentA, Iterations::kCount>; ////////////////////////////////////////////////////////////////////////////////////////////////////// /// statically assert this specialization ///////////////////////////////////////////////////////////////////////////////////////////////////// /// This iterator is specalized for Operand A static_assert(kOperand == Operand::kA, "MmaTensorOpWmmaMultiplicandTileIterator may only be instantiated for A operands to warp-level Mma."); /// Supported memory layouts static_assert( platform::is_same<cutlass::layout::RowMajor, Layout>::value || platform::is_same<cutlass::layout::ColumnMajor, Layout>::value, "Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor"); /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); ///////////////////////////////////////////////////////////////////////////////////////////////////// private: /// Shared memory base pointers - not advanced char const *pointer_; /// Byte offset into shared memory - advanced Index byte_offset_; /// Stride in units of number of elements StrideIndex stride_; /// Layout of shared memory Layout layout_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpWmmaMultiplicandTileIterator() { } /// Constructor from TensorRef CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator( TensorRef const &ref, int lane_id ): pointer_(reinterpret_cast<char const*>(ref.data())), byte_offset_(0), stride_(ref.stride(0)), layout_(ref.stride(0)) { } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += (offset * sizeof_bits<Element>::value) / 8; return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpWmmaMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { Index elements_offset = layout_({tile_offset.row() * Shape::kRow, tile_offset.column() * WmmaShape::kColumn}); byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8; return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator++() { Index elements_offset = layout_({0, WmmaShape::kColumn}); byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8; return *this; } /// Advances the iterator along the opposite of the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator--() { Index elements_offset = layout_({0, WmmaShape::kColumn}); byte_offset_ -= (elements_offset * sizeof_bits<Element>::value) / 8; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load_with_byte_offset(Fragment &frag, Index byte_offset) const { CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kColumn; ++k) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Iterations::kRow; ++m) { Index load_byte_offset = layout_({m * WmmaShape::kRow, k * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8; const WmmaDataType *ptr = reinterpret_cast<const WmmaDataType *>(pointer_ + byte_offset_ + load_byte_offset + byte_offset); nvcuda::wmma::load_matrix_sync(frag[m], ptr, stride_); } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_byte_offset(Fragment const &frag, Index byte_offset) const { CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kColumn; ++k) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Iterations::kRow; ++m) { Index store_byte_offset = layout_({m * WmmaShape::kRow, k * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8; WmmaDataType *ptr = reinterpret_cast<WmmaDataType *>(pointer_ + byte_offset_ + store_byte_offset + byte_offset); nvcuda::wmma::store_matrix_sync(ptr, frag[m], stride_); } } } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_byte_offset(frag, 0); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread WMMA operation. /// It uses nvcuda::wmma::load_matrix_sync to load from shared /// memory and therefore must be initialized with a TensorRef to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// //////////////////////////////////////////////////////////////////////////////// template < ///< Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of elements typename Element_, /// Layout of operand typename Layout_, /// Interval between adjacent *WMMA instructions (in units of WMMA instructions) int OpDelta_, /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) typename Policy_> class MmaTensorOpWmmaMultiplicandTileIterator< Shape_, Operand::kB, Element_, Layout_, OpDelta_, 32, Policy_> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kB; /// Element type using Element = Element_; /// Layout of source tile using Layout = Layout_; /// Delta between *WMMA operations static int const kOpDelta = OpDelta_; /// Wmma Operator information and operation delta using Policy = Policy_; // // Derived quantities // /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Stride Index type using StrideIndex = typename TensorRef::Layout::Stride::Index; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Native Wmma shape (concept MatrixShape) using WmmaShape = MatrixShape< Policy::Operator::Shape::kK, Policy::Operator::Shape::kN >; /// Map cutlass dataype to nvcuda::wmma datatype using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType<Element>::Type; /// Shape of individual WMMA load / stores for operand B using Iterations = MatrixShape< 1, Shape::kColumn / WmmaShape::kColumn >; /// Fragment object holding a warps part using Fragment = WmmaFragmentArray<typename Policy::Operator::FragmentB, Iterations::kCount>; ////////////////////////////////////////////////////////////////////////////////////////////////////// /// statically asserts this specialization ///////////////////////////////////////////////////////////////////////////////////////////////////// /// This iterator is specalized for Operand B static_assert(kOperand == Operand::kB, "MmaTensorOpWmmaMultiplicandTileIterator may only be instantiated for B operands to warp-level Mma."); /// Supported memory layouts static_assert( platform::is_same<cutlass::layout::RowMajor, Layout>::value || platform::is_same<cutlass::layout::ColumnMajor, Layout>::value, "Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor"); /// Not working on this feature at the moment. static_assert(kOpDelta == 1, "Alternative arrangements not supported at present."); ///////////////////////////////////////////////////////////////////////////////////////////////////// private: /// Shared memory base pointers - not advanced char const *pointer_; /// Byte offset into shared memory - advanced Index byte_offset_; /// Stride in units of number of elements StrideIndex stride_; /// Layout of shared memory Layout layout_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpWmmaMultiplicandTileIterator() { } /// Constructor from TensorRef CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator( TensorRef const &ref, int lane_id ): pointer_(reinterpret_cast<char const*>(ref.data())), byte_offset_(0), stride_(ref.stride(0)), layout_(ref.stride(0)) { } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += (offset * sizeof_bits<Element>::value) / 8; return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpWmmaMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) { Index elements_offset = layout_({tile_offset.row() * WmmaShape::kRow, tile_offset.column() * Shape::kColumn}); byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8; return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator++() { Index elements_offset = layout_({WmmaShape::kRow, 0}); byte_offset_ += (elements_offset * sizeof_bits<Element>::value) / 8; return *this; } /// Advances the iterator along the opposite of the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator--() { Index elements_offset = layout_({WmmaShape::kRow, 0}); byte_offset_ -= (elements_offset * sizeof_bits<Element>::value) / 8; return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpWmmaMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load_with_byte_offset(Fragment &frag, Index byte_offset) const { CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kRow; ++k) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kColumn; ++n) { Index load_byte_offset = layout_({k * WmmaShape::kRow, n * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8; const WmmaDataType *ptr = reinterpret_cast<const WmmaDataType *>(pointer_ + byte_offset_ + load_byte_offset + byte_offset); nvcuda::wmma::load_matrix_sync(frag[n], ptr, stride_); } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_byte_offset(Fragment const &frag, Index byte_offset) const { CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kRow; ++k) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kColumn; ++n) { Index store_byte_offset = layout_({k * WmmaShape::kRow, n * WmmaShape::kColumn}) * sizeof_bits<Element>::value / 8; WmmaDataType *ptr = reinterpret_cast<WmmaDataType *>(pointer_ + byte_offset_ + store_byte_offset + byte_offset); nvcuda::wmma::store_matrix_sync(ptr, frag[n], stride_); } } } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_byte_offset(frag, 0); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; //////////////////////////////////////////////////////////////////////////////// template < ///< Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Element type typename Element_, /// Layout of operand in memory typename Layout_, /// Interval between adjacent *WMMA instructions (in units of WMMA instructions, concept: MatrixShape) typename OpDelta_, /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) typename Policy_> class MmaTensorOpWmmaAccumulatorTileIterator; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread WMMA operation. /// It uses nvcuda::wmma::store_matrix_sync to load from shared /// memory and therefore must be initialized with a TensorRef to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept | /// WriteableRandomAccessContiguousTileIteratorConcept /// //////////////////////////////////////////////////////////////////////////////// template < ///< Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of elements typename Element_, /// Layout of operand in memory typename Layout_, /// Interval between adjacent *WMMA instructions (in units of WMMA instructions) typename OpDelta_, /// Shape of the warp in units of thread (concept: MmaTensorOpPolicy) typename Policy_> class MmaTensorOpWmmaAccumulatorTileIterator { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Element type using Element = Element_; /// Layout of source tile using Layout = Layout_; /// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape) using OpDelta = OpDelta_; /// Number of participating threads static int const kThreads = 32; /// Wmma Operator information and operation delta using Policy = Policy_; // // Derived quantities // /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Native Wmma shape (concept MatrixShape) using WmmaShape = MatrixShape< Policy::Operator::Shape::kM, Policy::Operator::Shape::kN >; /// Map cutlass dataype to nvcuda::wmma datatype using WmmaDataType = typename cutlass::arch::CutlassToWmmaDataType<Element>::Type; /// Map cutlass::layout to nvuda::wmma::layout_t enum static nvcuda::wmma::layout_t const WmmaLayout = cutlass::arch::CutlassToWmmaLayout<Layout>::value; /// Shape of individual WMMA load / stores for accumulator using Iterations = MatrixShape< Shape::kRow / WmmaShape::kRow, Shape::kColumn / WmmaShape::kColumn >; /// Fragment object holding a thread's part of a tile using Fragment = WmmaFragmentArray<typename Policy::Operator::FragmentC, Iterations::kCount>; ////////////////////////////////////////////////////////////////////////////////////////////////////// /// statically asserts this specialization ///////////////////////////////////////////////////////////////////////////////////////////////////// /// Supported layouts static_assert( platform::is_same<cutlass::layout::RowMajor, Layout>::value || platform::is_same<cutlass::layout::ColumnMajor, Layout>::value, "Supported list of memory layouts for WMMA are: RowMajor, ColumnMajor"); private: /// Internal reference cutlass::TensorRef<Element, Layout> ref_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaTensorOpWmmaAccumulatorTileIterator() { } /// Constructor from TensorRef CUTLASS_DEVICE MmaTensorOpWmmaAccumulatorTileIterator( TensorRef const &ref, int lane_id ): ref_(ref) { } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE MmaTensorOpWmmaAccumulatorTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaTensorOpWmmaAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) { ref_.add_coord_offset({tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE MmaTensorOpWmmaAccumulatorTileIterator & operator++() { ref_.add_coord_offset({Shape::kRow, 0}); return *this; } /// Advances the iterator along the opposite of the advance dimension CUTLASS_HOST_DEVICE MmaTensorOpWmmaAccumulatorTileIterator & operator--() { ref_.add_coord_offset({-Shape::kRow, 0}); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpWmmaAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of the tensor CUTLASS_DEVICE MmaTensorOpWmmaAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Iterations::kRow; ++m) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kColumn; ++n) { const WmmaDataType * ptr = reinterpret_cast<const WmmaDataType*> (ref_.data() + ref_.offset({m * WmmaShape::kRow, n * WmmaShape::kColumn}) + pointer_offset); nvcuda::wmma::load_matrix_sync(frag[m * Iterations::kColumn + n], ptr, ref_.stride()[0], WmmaLayout); } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Iterations::kRow; ++m) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kColumn; ++n) { WmmaDataType * ptr = reinterpret_cast<WmmaDataType*> (ref_.data() + ref_.offset({m * WmmaShape::kRow, n * WmmaShape::kColumn}) + pointer_offset); nvcuda::wmma::store_matrix_sync(ptr, frag[m * Iterations::kColumn + n], ref_.stride()[0], WmmaLayout); } } } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_pointer_offset(frag, 0); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; } // namespace warp } // namespace gemm } // namespace cutlass //////////////////////////////////////////////////////////////////////////////// #endif // if defined(CUTLASS_ARCH_WMMA_ENABLED)
include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h/0
{ "file_path": "include/cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h", "repo_id": "include", "token_count": 8607 }
35
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines layout functions used by TensorRef and derived classes for pitch-linear memory. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/coord.h" #include "cutlass/pitch_linear_coord.h" namespace cutlass { namespace layout { template <int Contiguous, int Strided> using PitchLinearShape = cutlass::PitchLinearShape < Contiguous, Strided >; using PitchLinearCoord = PitchLinearCoord; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Mapping function for pitch-linear memory class PitchLinear { public: /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, LongIndex>; private: // // Data members // /// Stride data member Stride stride_; public: // // Methods // /// Constructor CUTLASS_HOST_DEVICE PitchLinear(LongIndex ldm = 0): stride_(ldm) { } /// Constructor CUTLASS_HOST_DEVICE PitchLinear(Stride _stride): stride_(_stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static PitchLinear packed(TensorCoord const &extent) { return PitchLinear(extent.contiguous()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return LongIndex(coord.contiguous()) + LongIndex(coord.strided()) * LongIndex(stride_[0]); } /// Returns the logical coordinate given an offset. CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex index) const { return make_Coord( TensorCoord::Index(index % stride_[0]), TensorCoord::Index(index / stride_[0]) ); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE LongIndex stride(int rank) const { return stride_[rank]; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE LongIndex & stride(int rank) { return stride_[rank]; } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return extent.strided() * stride_[0]; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace layout } // namespace cutlass
include/cutlass/layout/pitch_linear.h/0
{ "file_path": "include/cutlass/layout/pitch_linear.h", "repo_id": "include", "token_count": 1370 }
36
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines container classes and iterators for managing a statically sized vector of boolean predicates. */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #include <cuda/std/cstdint> #else #include <assert.h> #include <stdint.h> #endif #include "cutlass/cutlass.h" #include "cutlass/platform/platform.h" namespace cutlass { //////////////////////////////////////////////////////////////////////////////////////////////////// /*!@defgroup predicate_vector_concept Predicate Vector Concept @{ Implementations of \ref predicate_vector_concept contain an ordered set of boolean predicates which may be used as conditionals in other device-side operations. Both random access and iterators offering sequential access are provided. @par Predicate Vector A \ref predicate_vector_concept satisfies the following expressions - <b>at(int idx)</b> - returns the value of the indexed predicate - <b>set(int idx, bool value)</b> - sets the value of the indexed predicate - <b>begin()</b> - returns a \ref predicate_iterator_concept pointing to the first predicate @} */ //////////////////////////////////////////////////////////////////////////////////////////////////// /*!@defgroup predicate_iterator_concept Predicate Iterator Concept @{ Implementations of \ref predicate_iterator_concept enables accessing and traversing elements of a bit vector. @par Const Predicate Iterator A const \ref predicate_iterator_concept satisfies the following expressions - <b>++it</b> increments the iterator to the next predicate - <b>*it</b> returns the value of the currently pointed-to predicate @par Mutable Predicate Iterator A \ref predicate_iterator_concept that is non-const <b>also</b> satisfies the following expressions - <b>it.set(bool value)</b> sets the value of the currently pointed-to predicate @} */ //////////////////////////////////////////////////////////////////////////////////////////////////// /*!@defgroup predicate_tile_adapter Predicate Tile Adapter Concept @{ Implementations of \ref predicate_tile_adapter provide a mapping between a the elements of a \ref tile_traits_concept and a \ref predicate_vector_concept. @par Predicate Tile Adapter A \ref predicate_tile_adapter satisfies the following expressions - <b>at(int d, int h, int w, int c)</b> - returns the value of a predicate corresponding to the access (d, h, w, c) within the tile. @} */ //////////////////////////////////////////////////////////////////////////////////////////////////// /// Statically sized array of bits implementing @concept{predicate_vector_concept}. template < /// Number of predicates contained in predicate vector int kPredicates_, /// Number of predicates contained in each byte of internal storage int kPredicatesPerByte_ = 4, /// Location of first predicate within byte of internal storage int kPredicateStart_ = 0> struct PredicateVector { /// Number of bits stored by the PredicateVector static constexpr int kPredicates = kPredicates_; /// Number of bits stored within each byte of the predicate bit vector static constexpr int kPredicatesPerByte = kPredicatesPerByte_; /// First bit within each byte containing predicates static constexpr int kPredicateStart = kPredicateStart_; // Make sure no one tries to put more than 8 bits in a byte :) static_assert(kPredicatesPerByte <= 8, "kPredicatesPerByte must fit within an actual byte"); // Make sure the "offsetted" bits fit in one byte. static_assert(kPredicateStart + kPredicatesPerByte <= 8, "The offsetted predicates must fit within an actual byte."); /// Storage type of individual elements typedef uint32_t Storage; /// Number of bytes needed static constexpr int kBytes = (kPredicates + kPredicatesPerByte - 1) / kPredicatesPerByte; /// Number of storage elements needed static constexpr int kWordCount = (kBytes + int(sizeof(Storage)) - 1) / int(sizeof(Storage)); /// The byte mask corresponding to predicates static constexpr Storage kByteMask = (((1 << kPredicatesPerByte) - 1) << kPredicateStart); private: // // Data members // /// Words of bit vector Storage storageData[kWordCount]; // // Methods // /// Computes the word and bit corresponding to a logical predicate index CUTLASS_HOST_DEVICE void computeStorageOffset(int &word, int &bit, int idx) const { CUTLASS_ASSERT(idx < kPredicates); int byte = (idx / kPredicatesPerByte); int bit_offset = (idx % kPredicatesPerByte); word = byte / sizeof(Storage); int byte_offset = (byte % sizeof(Storage)); bit = byte_offset * 8 + bit_offset + kPredicateStart; } /// Returns word mask. CUTLASS_HOST_DEVICE static constexpr bool computeWordMask() { Storage mask(0); CUTLASS_PRAGMA_UNROLL for (size_t byte = 0; byte < sizeof(Storage); ++byte) { mask |= (kByteMask << (byte * 8)); } return mask; } /// Returns mask of last word. CUTLASS_HOST_DEVICE static constexpr bool computeLastWordMask() { Storage mask(0); CUTLASS_PRAGMA_UNROLL for (int byte = 0; byte < kBytes % sizeof(Storage); ++byte) { mask |= (kByteMask << (byte * 8)); } return mask; } /// Accesses a given word with optional assertions CUTLASS_HOST_DEVICE Storage &storage(int word) { CUTLASS_ASSERT(word < kWordCount); return storageData[word]; } /// Accesses a given word with optional assertions CUTLASS_HOST_DEVICE Storage const &storage(int word) const { CUTLASS_ASSERT(word < kWordCount); return storageData[word]; } public: // // Iterator // /** * @brief An iterator implementing \ref predicate_iterator_concept enabling sequential * read and write access to predicates. * @concept{predicate_iterator_concept} */ class Iterator { /// Reference to PredicateVector instance PredicateVector &vec_; /// Index into PredicateVector int bit_; public: /// Copy constructor CUTLASS_HOST_DEVICE Iterator(Iterator const &it) : vec_(it.vec_), bit_(it.bit_) {} /// Constructs an iterator from a PredicateVector CUTLASS_HOST_DEVICE Iterator(PredicateVector &vec, int _start = 0) : vec_(vec), bit_(_start) {} /// Pre-increment CUTLASS_HOST_DEVICE Iterator &operator++() { ++bit_; return *this; } /// Increment CUTLASS_HOST_DEVICE Iterator &operator+=(int offset) { bit_ += offset; return *this; } /// Pre-decrement CUTLASS_HOST_DEVICE Iterator &operator--() { --bit_; return *this; } /// Decrement CUTLASS_HOST_DEVICE Iterator &operator-=(int offset) { bit_ -= offset; return *this; } /// Post-increment CUTLASS_HOST_DEVICE Iterator operator++(int) { Iterator ret(*this); ret.bit_++; return ret; } /// Post-decrement CUTLASS_HOST_DEVICE Iterator operator--(int) { Iterator ret(*this); ret.bit_--; return ret; } /// Iterator advances by some amount CUTLASS_HOST_DEVICE Iterator operator+(int offset) { Iterator ret(*this); ret.bit_ += offset; return ret; } /// Iterator recedes by some amount CUTLASS_HOST_DEVICE Iterator operator-(int offset) { ConstIterator ret(*this); ret.bit_ -= offset; return ret; } /// Returns true if iterators point to the same bit CUTLASS_HOST_DEVICE bool operator==(Iterator const &it) const { return bit_ == it.bit_; } /// Returns false if iterators point to the same bit CUTLASS_HOST_DEVICE bool operator!=(Iterator const &it) const { return bit_ != it.bit_; } /// Gets the bit at the pointed to location CUTLASS_HOST_DEVICE bool get() { return vec_.at(bit_); } /// Gets the bit at the pointed to location CUTLASS_HOST_DEVICE bool at() const { return vec_.at(bit_); } /// Dereferences iterator CUTLASS_HOST_DEVICE bool operator*() const { return at(); } /// Sets the bit at the pointed to location CUTLASS_HOST_DEVICE void set(bool value = true) { vec_.set(bit_, value); } }; /** * @brief An iterator implementing \ref predicate_iterator_concept enabling sequential * read and write access to predicates. * @concept{predicate_iterator_concept} */ class ConstIterator { /// Reference to PredicateVector instance PredicateVector const &vec_; /// Index into PredicateVector int bit_; public: /// Copy constructor CUTLASS_HOST_DEVICE ConstIterator(ConstIterator const &it) : vec_(it.vec_), bit_(it.bit_) {} /// Constructs an iterator from a PredicateVector CUTLASS_HOST_DEVICE ConstIterator(PredicateVector const &vec, int _start = 0) : vec_(vec), bit_(_start) {} /// Pre-increment CUTLASS_HOST_DEVICE ConstIterator &operator++() { ++bit_; return *this; } /// Increment CUTLASS_HOST_DEVICE ConstIterator &operator+=(int offset) { bit_ += offset; return *this; } /// Pre-decrement CUTLASS_HOST_DEVICE ConstIterator &operator--() { --bit_; return *this; } /// Decrement CUTLASS_HOST_DEVICE ConstIterator &operator-=(int offset) { bit_ -= offset; return *this; } /// Post-increment CUTLASS_HOST_DEVICE ConstIterator operator++(int) { ConstIterator ret(*this); ret.bit_++; return ret; } /// Post-decrement CUTLASS_HOST_DEVICE ConstIterator operator--(int) { ConstIterator ret(*this); ret.bit_--; return ret; } /// Iterator advances by some amount CUTLASS_HOST_DEVICE ConstIterator operator+(int offset) { ConstIterator ret(*this); ret.bit_ += offset; return ret; } /// Iterator recedes by some amount CUTLASS_HOST_DEVICE ConstIterator operator-(int offset) { ConstIterator ret(*this); ret.bit_ -= offset; return ret; } /// Returns true if iterators point to the same bit CUTLASS_HOST_DEVICE bool operator==(ConstIterator const &it) const { return bit_ == it.bit_; } /// Returns false if iterators point to the same bit CUTLASS_HOST_DEVICE bool operator!=(ConstIterator const &it) const { return bit_ != it.bit_; } /// Gets the bit at the pointed to location CUTLASS_HOST_DEVICE bool get() { return vec_.at(bit_); } /// Gets the bit at the pointed to location CUTLASS_HOST_DEVICE bool at() const { return vec_.at(bit_); } /// Dereferences iterator CUTLASS_HOST_DEVICE bool operator*() const { return at(); } }; /// Iterator that always returns true struct TrivialIterator { /// Constructor CUTLASS_HOST_DEVICE TrivialIterator() {} /// Copy constructor CUTLASS_HOST_DEVICE TrivialIterator(Iterator const &it) {} /// Constructs an iterator from a PredicateVector CUTLASS_HOST_DEVICE TrivialIterator(PredicateVector const &_vec) {} /// Pre-increment CUTLASS_HOST_DEVICE TrivialIterator &operator++() { return *this; } /// Post-increment CUTLASS_HOST_DEVICE TrivialIterator operator++(int) { return *this; } /// Dereferences iterator CUTLASS_HOST_DEVICE bool operator*() const { return true; } }; public: // // Methods // /// Initialize the predicate vector CUTLASS_HOST_DEVICE PredicateVector(bool value = true) { fill(value); } /// Fills all predicates with a given value CUTLASS_HOST_DEVICE void fill(bool value = true) { Storage item = (value ? ~Storage(0) : Storage(0)); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kWordCount; ++i) { storage(i) = item; } } /// Clears all predicates CUTLASS_HOST_DEVICE void clear() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kWordCount; ++i) { storage(i) = 0; } } /// Sets all predicates to true CUTLASS_HOST_DEVICE void enable() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kWordCount; ++i) { storage(i) = ~Storage(0); } } /// Accesses a bit within the predicate vector. CUTLASS_HOST_DEVICE bool operator[](int idx) const { return at(idx); } /// Accesses a bit within the predicate vector. CUTLASS_HOST_DEVICE bool at(int idx) const { int bit, word; computeStorageOffset(word, bit, idx); return ((storage(word) >> bit) & 1); } /// Set a bit within the predicate vector. CUTLASS_HOST_DEVICE void set(int idx, bool value = true) { int bit, word; computeStorageOffset(word, bit, idx); Storage disable_mask = (~(Storage(1) << bit)); Storage enable_mask = (Storage(value) << bit); storage(word) = ((storage(word) & disable_mask) | enable_mask); } /// Computes the intersection of two identical predicate vectors. CUTLASS_HOST_DEVICE PredicateVector &operator&=(PredicateVector const &predicates) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kWordCount; ++i) { storage(i) = (storage(i) & predicates.storage(i)); } return *this; } /// Computes the union of two identical predicate vectors. CUTLASS_HOST_DEVICE PredicateVector &operator|=(PredicateVector const &predicates) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kWordCount; ++i) { storage(i) = (storage(i) | predicates.storage(i)); } return *this; } /// Returns true if entire predicate array is zero. CUTLASS_HOST_DEVICE bool is_zero() const { constexpr Storage mask = computeWordMask(); Storage result = 0; CUTLASS_PRAGMA_UNROLL for (int word = 0; word < kWordCount - 1; ++word) { result |= (storage(word) & mask); } constexpr Storage last_word_mask = computeLastWordMask(); result |= (storage(kWordCount - 1) & last_word_mask); return result == 0; } /// Returns an iterator to the start of the bit vector CUTLASS_DEVICE Iterator begin() { return Iterator(*this); } /// Returns an iterator CUTLASS_DEVICE Iterator end() { return Iterator(*this, kPredicates); } /// Returns a ConstIterator CUTLASS_DEVICE ConstIterator const_begin() const { return ConstIterator(*this); } /// Returns a ConstIterator CUTLASS_DEVICE ConstIterator const_end() const { return ConstIterator(*this, kPredicates); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass
include/cutlass/predicate_vector.h/0
{ "file_path": "include/cutlass/predicate_vector.h", "repo_id": "include", "token_count": 5505 }
37
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Provides a mechanism for packing and unpacking elements smaller than one byte */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/integer_subbyte.h" #include "cutlass/fast_math.h" namespace cutlass { namespace detail { // This is an implementation detail of cutlass::SubbyteReference and. // cutlass::HostTensor. For a given logical element type Element, // and its corresponding storage (physical) element type StorageUnit, // it computes quantities that help with managing allocations. // // CUTLASS uses a hidden "ContainerUnitType" or StorageUnit type to support // packed arrays of subbyte types such as int4. Element is the "logical" type // for computations, while CUTLASS uses StorageUnit as the element type // of a packed array of Element. If Element is not a subbyte type, // then the corresponding StorageUnit type is just Element itself. // // The ContainerType is always calculated as an array StorageUnit type (the StorageUnit // is always a byte for subbyte types), // and its number of bits is the lcm of the subbyte type's number of bits and 8. // Below are some examples for different subbyte types. // // * Subbyte Type=int2, ContainerType=StorageUnit[1] (StorageUnit=uint8_t) // * Subbyte Type=int4, ContainerType=StorageUnit[1] (StorageUnit=uint8_t) template<class Element, class StorageUnit> struct StorageContainerCalculator { // kContainerTypeNumBits: The number of bits needed for ContainerType static constexpr int kContainerTypeNumBits = (sizeof_bits<Element>::value < 8) ? cutlass::lcm_cxx11(sizeof_bits<Element>::value, sizeof_bits<StorageUnit>::value) : sizeof_bits<Element>::value; static_assert(kContainerTypeNumBits % sizeof_bits<Element>::value == 0, "The bits of ContainerType should be divisible by the element's number of bits"); // kContainerTypeNumLogicalElements: The number of logical Element instance(s) that can be stored per ContainerType instance static constexpr int kContainerTypeNumLogicalElements = kContainerTypeNumBits / sizeof_bits<Element>::value; /// 3. kContainerTypeNumBytes: The number of bytes per ContainerType instance static constexpr int kContainerTypeNumBytes = kContainerTypeNumBits / 8; /// 4. kContainerTypeNumBytes: The number of base StorageUnit in the ContainerType static constexpr int kContainerTypeNumStorageUnit = kContainerTypeNumBits / sizeof_bits<StorageUnit>::value; static_assert(kContainerTypeNumBits != 0, "kContainerTypeNumBits can not be zero"); static_assert(kContainerTypeNumLogicalElements != 0, "kContainerTypeNumLogicalElements can not be zero"); static_assert(kContainerTypeNumBytes != 0, "kContainerTypeNumBytes can not be zero"); }; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// This class provides a mechanism for packing and unpacking elements smaller than one byte. It /// assumes these sub-byte elements are packed in a traditional C++ numeric type. /// /// The intended application is to provide a mechanism to indirectly reference elements in /// memory or Array<> objects whose addresses cannot otherwise be taken since they are smaller /// than one byte. /// /// Supports basic pointer arithmetic: /// /// Example: /// /// int4b_t *ptr = ...; /// /// SubbyteReference<int4b_t> ref = ptr; /// ref += 15; /// /// int4b_t x = ref; // load an int4b_t /// ref = x + 2_s4; // perform arithmetic on int4b_t and then store /// template < typename Element_, /// CUTLASS numeric element type. typename Storage_ = uint8_t, /// Underlying storage type. Must be able to hold an integer /// number of objects of type Element. class = void > class ConstSubbyteReference { public: using Element = Element_; using Storage = Storage_; using StoragePointer = Storage const *; static_assert(sizeof_bits<Element>::value <= sizeof_bits<Storage>::value, "Size of Element must not be greater than Storage."); static_assert(!(sizeof_bits<Storage>::value % sizeof_bits<Element>::value), "Storage must be divisible by Element"); private: ///! Number of elements per storage vector int const kElementsPerVector = sizeof_bits<Storage>::value / sizeof_bits<Element>::value; ///! Bit mask Storage const kMask = ((sizeof_bits<Element>::value < sizeof_bits<Storage>::value) ? (Storage(1) << sizeof_bits<Element>::value) - Storage(1) : ~Storage(0)); private: /// Pointer to array containing element StoragePointer ptr_; /// Offset (in units of elements) from pointer. /// /// Invariant: must always be in range [0, kElementsPerVector) int offset_; public: CUTLASS_HOST_DEVICE ConstSubbyteReference(): ptr_(nullptr), offset_(0) { } /// Constructor CUTLASS_HOST_DEVICE ConstSubbyteReference( Element const *ptr, /// pointer to memory int64_t offset /// logical offset in units of Element ): ptr_(reinterpret_cast<StoragePointer>(ptr)), offset_(0) { int64_t offset_in_vectors = offset / kElementsPerVector; int64_t offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = int(offset_in_elements); } /// Constructor CUTLASS_HOST_DEVICE ConstSubbyteReference( Element *ptr = nullptr ): ConstSubbyteReference(ptr, 0) { } /// Gets storage pointer CUTLASS_HOST_DEVICE StoragePointer storage_pointer() const { return ptr_; } /// Gets element offset within storage vector CUTLASS_HOST_DEVICE int element_offset() const { return offset_; } /// Unpacks an element from memory CUTLASS_HOST_DEVICE Element get() const { Storage item = Storage((*ptr_ >> (offset_ * sizeof_bits<Element>::value)) & kMask); return reinterpret_cast<Element const &>(item); } /// Unpacks an element from memory CUTLASS_HOST_DEVICE operator Element() const { return get(); } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator+=(int offset) { offset += offset_; int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = offset_in_elements; return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator+=(long long offset) { offset += offset_; long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ += offset_in_vectors; offset_ = offset_in_elements; return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator-=(int offset) { int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator-=(long long offset) { long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } return *this; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator+(int offset) const { ConstSubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator+(long long offset) const { ConstSubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator-(int offset) const { ConstSubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator-=(long long offset) const { ConstSubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Computes the difference in elements between references CUTLASS_HOST_DEVICE ptrdiff_t operator-(ConstSubbyteReference ref) const { return (ptr_ - ref.ptr_) * kElementsPerVector + (offset_ - ref.offset_); } /// Explicit cast to int CUTLASS_HOST_DEVICE explicit operator int() const { return int(get()); } /// Explicit cast to signed 64-bit integer CUTLASS_HOST_DEVICE explicit operator int64_t() const { return int64_t(get()); } /// Explicit cast to unsigned 64-bit integer CUTLASS_HOST_DEVICE explicit operator uint64_t() const { return uint64_t(get()); } /// Explicit cast to float CUTLASS_HOST_DEVICE explicit operator float() const { return float(get()); } /// Explicit cast to double CUTLASS_HOST_DEVICE explicit operator double() const { return double(get()); } }; template < typename Element_, /// CUTLASS numeric element type. typename Storage_ = /// Underlying storage type. Must be able to hold an integer /// number of objects of type Element. #if defined(__CUDA_ARCH__) /// Default size depends on width of atomicCas() overloads. #if (__CUDA_ARCH__ >= 700) /// uint16_t #else uint32_t #endif #else uint8_t #endif , class = void > class SubbyteReference { public: using Element = Element_; using Storage = Storage_; using StoragePointer = Storage *; static_assert(sizeof_bits<Element>::value <= sizeof_bits<Storage>::value, "Size of Element must not be greater than Storage."); static_assert(!(sizeof_bits<Storage>::value % sizeof_bits<Element>::value), "Storage must be divisible by Element"); private: ///! Number of elements per storage vector int const kElementsPerVector = sizeof_bits<Storage>::value / sizeof_bits<Element>::value; ///! Bit mask Storage const kMask = ((sizeof_bits<Element>::value < sizeof_bits<Storage>::value) ? (Storage(1) << sizeof_bits<Element>::value) - Storage(1) : ~Storage(0)); private: /// Pointer to array containing element StoragePointer ptr_; /// Offset (in units of elements) from pointer. /// /// Invariant: must always be in range [0, kElementsPerVector) int offset_; public: CUTLASS_HOST_DEVICE SubbyteReference(): ptr_(nullptr), offset_(0) { } /// Constructor CUTLASS_HOST_DEVICE SubbyteReference( Element *ptr, /// pointer to memory int64_t offset /// logical offset in units of Element ): ptr_(reinterpret_cast<StoragePointer>(ptr)), offset_(0) { int64_t offset_in_vectors = offset / kElementsPerVector; int64_t offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = int(offset_in_elements); } /// Constructor CUTLASS_HOST_DEVICE SubbyteReference( Element *ptr = nullptr ): SubbyteReference(ptr, 0) { } /// Gets storage pointer CUTLASS_HOST_DEVICE StoragePointer storage_pointer() const { return ptr_; } /// Gets storage pointer CUTLASS_HOST_DEVICE Element * operator&() const { return reinterpret_cast<Element *>(ptr_); } /// Gets element offset within storage vector CUTLASS_HOST_DEVICE int element_offset() const { return offset_; } /// Unpacks an element from memory CUTLASS_HOST_DEVICE Element get() const { uint8_t const* byte_ptr = reinterpret_cast<uint8_t const*>(ptr_); // Convert offset in elements to offset in bytes constexpr int elements_per_byte = cutlass::sizeof_bits<uint8_t>::value / cutlass::sizeof_bits<Element>::value; byte_ptr += offset_ / elements_per_byte; // Offset of element within a byte int byte_offset = offset_ % elements_per_byte; uint8_t item = uint8_t((*byte_ptr >> (byte_offset * cutlass::sizeof_bits<Element>::value)) & kMask); return reinterpret_cast<Element const &>(item); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference & set(Element const &x) { Storage item = (reinterpret_cast<Storage const &>(x) & kMask); Storage kUpdateMask = Storage(~(kMask << (offset_ * cutlass::sizeof_bits<Element>::value))); Storage new_bits = Storage(item << (offset_ * cutlass::sizeof_bits<Element>::value)); #if defined(__CUDA_ARCH__) // // Homebrew read-modify-write // Storage original; Storage updated; do { original = (*ptr_); updated = Storage((original & kUpdateMask) | new_bits); original = atomicCAS(ptr_, original, updated); } while (updated != original); #else Storage original = (*ptr_); Storage updated = Storage((original & kUpdateMask) | new_bits); *ptr_ = updated; #endif return *this; } //// /// Unpacks an element from memory CUTLASS_HOST_DEVICE operator Element() const { return get(); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference &operator=(Element const & x) { return set(x); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference &operator=(SubbyteReference const & x) { return set(x.get()); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference &operator=( ConstSubbyteReference<Element, Storage> const &x) { return set(x.get()); } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator+=(int offset) { offset += offset_; int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = offset_in_elements; return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator+=(long long offset) { offset += offset_; long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ += offset_in_vectors; offset_ = offset_in_elements; return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator-=(int offset) { int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator-=(long long offset) { long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } return *this; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator+(int offset) const { SubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator+(long long offset) const { SubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator-(int offset) const { SubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator-=(long long offset) const { SubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Computes the difference in elements between references CUTLASS_HOST_DEVICE ptrdiff_t operator-(SubbyteReference ref) const { return (ptr_ - ref.ptr_) * kElementsPerVector + (offset_ - ref.offset_); } /// Explicit cast to int CUTLASS_HOST_DEVICE explicit operator int() const { return int(get()); } /// Explicit cast to signed 64-bit integer CUTLASS_HOST_DEVICE explicit operator int64_t() const { return int64_t(get()); } /// Explicit cast to unsigned 64-bit integer CUTLASS_HOST_DEVICE explicit operator uint64_t() const { return uint64_t(get()); } /// Explicit cast to float CUTLASS_HOST_DEVICE explicit operator float() const { return float(get()); } /// Explicit cast to double CUTLASS_HOST_DEVICE explicit operator double() const { return double(get()); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> using _war = T; template < typename Element_, /// CUTLASS numeric element type. typename Storage_ /// Underlying basic storage type. > class SubbyteReference<Element_, Storage_, typename platform::enable_if<sizeof_bits<Storage_>::value % sizeof_bits<Element_>::value != 0>::type> { public: using Element = Element_; /// Note: It's possible that StorageUnit is not divisible by Element. /// For example, an Element instance might be stored across 2 StorageUnit instances. /// Thus, CUTLASS needs a storage vector to hold an integer number of Element instances. using StorageUnit = Storage_; private: using StorageContainerCalculator = cutlass::detail::StorageContainerCalculator<Element, StorageUnit>; public: static int const kBitsStoredVec = StorageContainerCalculator::kContainerTypeNumBits; static int const kNumStorageUnitPerStoredVec = StorageContainerCalculator::kContainerTypeNumStorageUnit; using StorageVec = StorageUnit[kNumStorageUnitPerStoredVec]; using StorageVecPointer = StorageVec *; using CudaAtomicType = typename platform::conditional< sizeof_bits<StorageUnit>::value == 16, uint32_t, uint64_t >::type; static_assert(sizeof_bits<Element>::value <= sizeof_bits<StorageVec>::value, "Size of Element must not be greater than StorageVec."); static_assert(!(sizeof_bits<StorageVec>::value % sizeof_bits<Element>::value), "StorageVec must be divisible by Element"); private: ///! Number of elements per storage vector int const kElementsPerVector = sizeof_bits<StorageVec>::value / sizeof_bits<Element>::value; ///! Bit mask for storage unit. StorageUnit const kMask = (StorageUnit(1) << sizeof_bits<Element>::value) - StorageUnit(1); /// Pointer to array containing element _war<StorageVecPointer> ptr_; /// Offset (in units of elements) from pointer. /// /// Invariant: must always be in range [0, kElementsPerVector) int offset_; /// Element may be stored across 2 storage unit. /// Low storage unit index in StorageVec /// High storage unit index in StorageVec int low_storage_unit_idx_; int high_storage_unit_idx_; /// Full Mask to extract the entire element uint64_t full_element_mask_; /// Mask to extract the Element from Low storage unit and High storage unit. StorageUnit low_storage_mask_; StorageUnit high_storage_mask_; /// Start bit index inside the storage unit. int start_bit_idx_; private: CUTLASS_HOST_DEVICE void update_element_status() { int num_bits = offset_ * sizeof_bits<Element>::value; start_bit_idx_ = num_bits % sizeof_bits<StorageUnit>::value; low_storage_unit_idx_ = num_bits / sizeof_bits<StorageUnit>::value; high_storage_unit_idx_ = sizeof_bits<StorageUnit>::value - (start_bit_idx_) < sizeof_bits<Element>::value ? low_storage_unit_idx_ + 1 : low_storage_unit_idx_; full_element_mask_ = uint64_t(kMask) << start_bit_idx_; low_storage_mask_ = StorageUnit(full_element_mask_ & ~StorageUnit(0)); high_storage_mask_ = StorageUnit((full_element_mask_ >> sizeof_bits<StorageUnit>::value) & ~StorageUnit(0)); } public: CUTLASS_HOST_DEVICE SubbyteReference(): ptr_(nullptr), offset_(0) { } /// Constructor CUTLASS_HOST_DEVICE SubbyteReference( Element *ptr, /// pointer to memory int64_t offset /// logical offset in units of Element ): ptr_(reinterpret_cast<StorageVecPointer>(ptr)), offset_(0) { int64_t offset_in_vectors = offset / kElementsPerVector; int64_t offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = int(offset_in_elements); update_element_status(); } /// Constructor CUTLASS_HOST_DEVICE SubbyteReference( Element *ptr = nullptr ): SubbyteReference(ptr, 0) { } /// Gets StorageVec pointer CUTLASS_HOST_DEVICE StorageVecPointer storage_pointer() const { return ptr_; } /// Gets StorageVec pointer CUTLASS_HOST_DEVICE Element * operator&() const { return reinterpret_cast<Element *>(ptr_); } /// Gets element offset within StorageVec vector CUTLASS_HOST_DEVICE int element_offset() const { return offset_; } /// Unpacks an element from memory CUTLASS_HOST_DEVICE Element get() const { StorageUnit low_bits = (*ptr_)[low_storage_unit_idx_] & low_storage_mask_; StorageUnit high_bits = low_storage_unit_idx_ != high_storage_unit_idx_ ? (*ptr_)[high_storage_unit_idx_] & high_storage_mask_ : 0; uint64_t full_item = ((uint64_t)high_bits << sizeof_bits<StorageUnit>::value) | low_bits; uint8_t result = uint8_t(full_item >> start_bit_idx_); return reinterpret_cast<Element const &>(result); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference & set(Element const &x) { uint64_t item = static_cast<uint64_t>((reinterpret_cast<uint8_t const &>(x) & kMask)) << start_bit_idx_; StorageUnit low_new_bits = StorageUnit(item & ~StorageUnit(0)); StorageUnit high_new_bits = StorageUnit(item >> sizeof_bits<StorageUnit>::value); StorageUnit const kLowUpdateMask = StorageUnit((~full_element_mask_) & (~StorageUnit(0))); StorageUnit const kHighUpdateMask = StorageUnit(((~full_element_mask_) >> sizeof_bits<StorageUnit>::value) & (~StorageUnit(0))); #if defined(__CUDA_ARCH__) // // Homebrew read-modify-write // if(high_storage_unit_idx_ != low_storage_unit_idx_){ /// Only need update 2 storage unit at once. /// consider misaligned address issue, we need to do atomicCAS twice StorageUnit original_low_bits, original_high_bits, update_low_bits, update_high_bits; do { original_low_bits = ((*ptr_)[low_storage_unit_idx_]); update_low_bits = (original_low_bits & kLowUpdateMask) | low_new_bits; original_low_bits = atomicCAS(&((*ptr_)[low_storage_unit_idx_]), original_low_bits, update_low_bits); } while (update_low_bits != original_low_bits); do { original_high_bits = ((*ptr_)[high_storage_unit_idx_]); update_high_bits = (original_high_bits & kHighUpdateMask) | high_new_bits; original_high_bits = atomicCAS(&((*ptr_)[high_storage_unit_idx_]), original_high_bits, update_high_bits); } while (update_high_bits != original_high_bits); } else { /// Only need update 1 storage unit. StorageUnit original, updated; do { original = ((*ptr_)[low_storage_unit_idx_]); updated = (original & kLowUpdateMask) | low_new_bits; original = atomicCAS(&((*ptr_)[low_storage_unit_idx_]), original, updated); } while (updated != original); } #else StorageUnit update_low_bits = ((*ptr_)[low_storage_unit_idx_] & kLowUpdateMask) | low_new_bits; StorageUnit update_high_bits = ((*ptr_)[high_storage_unit_idx_] & kHighUpdateMask) | high_new_bits; (*ptr_)[low_storage_unit_idx_] = update_low_bits; if(low_storage_unit_idx_ != high_storage_unit_idx_) (*ptr_)[high_storage_unit_idx_] = update_high_bits; #endif return *this; } //// /// Unpacks an element from memory CUTLASS_HOST_DEVICE operator Element() const { return get(); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference &operator=(Element const & x) { return set(x); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference &operator=(SubbyteReference const & x) { return set(x.get()); } /// Stores an element to memory CUTLASS_HOST_DEVICE SubbyteReference &operator=( ConstSubbyteReference<Element, StorageVec> const &x) { return set(x.get()); } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator+=(int offset) { offset += offset_; int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = offset_in_elements; update_element_status(); return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator+=(long long offset) { offset += offset_; long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ += offset_in_vectors; offset_ = offset_in_elements; update_element_status(); return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator-=(int offset) { int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } update_element_status(); return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE SubbyteReference &operator-=(long long offset) { long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } update_element_status(); return *this; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator+(int offset) const { SubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator+(long long offset) const { SubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator-(int offset) const { SubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE SubbyteReference operator-=(long long offset) const { SubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Computes the difference in elements between references CUTLASS_HOST_DEVICE ptrdiff_t operator-(SubbyteReference ref) const { return (ptr_ - ref.ptr_) * kElementsPerVector + (offset_ - ref.offset_); } /// Explicit cast to int CUTLASS_HOST_DEVICE explicit operator int() const { return int(get()); } /// Explicit cast to signed 64-bit integer CUTLASS_HOST_DEVICE explicit operator int64_t() const { return int64_t(get()); } /// Explicit cast to unsigned 64-bit integer CUTLASS_HOST_DEVICE explicit operator uint64_t() const { return uint64_t(get()); } /// Explicit cast to float CUTLASS_HOST_DEVICE explicit operator float() const { return float(get()); } /// Explicit cast to double CUTLASS_HOST_DEVICE explicit operator double() const { return double(get()); } }; template<typename T> using _war = T; template < typename Element_, /// CUTLASS numeric element type. typename Storage_ /// Underlying storage type. Must be able to hold an integer > class ConstSubbyteReference<Element_, Storage_, typename platform::enable_if<sizeof_bits<Storage_>::value % sizeof_bits<Element_>::value != 0>::type> { public: using Element = Element_; ///! Note: Storage unit could not be divisibale by Element, /// Type element may be stored across 2 storage units, so need a storage vector to hold integer /// number of objects of type Element. using StorageUnit = Storage_; static int const kBitsStoredVec = cutlass::lcm_cxx11(sizeof_bits<Element>::value, sizeof_bits<StorageUnit>::value); static int const kNumStorageUnitPerStoredVec = kBitsStoredVec / sizeof_bits<StorageUnit>::value; using StorageVec = StorageUnit[kNumStorageUnitPerStoredVec]; using StorageVecPointer = StorageVec const *; using CudaAtomicType = typename platform::conditional< sizeof_bits<StorageUnit>::value == 16, uint32_t, uint64_t >::type; static_assert(sizeof_bits<Element>::value <= sizeof_bits<StorageVec>::value, "Size of Element must not be greater than StorageVec."); static_assert(!(sizeof_bits<StorageVec>::value % sizeof_bits<Element>::value), "StorageVec must be divisible by Element"); private: ///! Number of elements per storage vector int const kElementsPerVector = sizeof_bits<StorageVec>::value / sizeof_bits<Element>::value; ///! Bit mask for storage unit. StorageUnit const kMask = (StorageUnit(1) << sizeof_bits<Element>::value) - StorageUnit(1); /// Pointer to array containing element _war<StorageVecPointer> ptr_; /// Offset (in units of elements) from pointer. /// /// Invariant: must always be in range [0, kElementsPerVector) int offset_; /// Element may be stored across 2 storage unit. /// Low storage unit index in StorageVec /// High storage unit index in StorageVec int low_storage_unit_idx_; int high_storage_unit_idx_; /// Full Mask to extract the entire element uint64_t full_element_mask_; /// Mask to extract the Element from Low storage unit and High storage unit. StorageUnit low_storage_mask_; StorageUnit high_storage_mask_; /// Start bit index inside the storage unit. int start_bit_idx_; private: CUTLASS_HOST_DEVICE void update_element_status() { int num_bits = offset_ * sizeof_bits<Element>::value; start_bit_idx_ = num_bits % sizeof_bits<StorageUnit>::value; low_storage_unit_idx_ = num_bits / sizeof_bits<StorageUnit>::value; high_storage_unit_idx_ = sizeof_bits<StorageUnit>::value - (start_bit_idx_) < sizeof_bits<Element>::value ? low_storage_unit_idx_ + 1 : low_storage_unit_idx_; full_element_mask_ = uint64_t(kMask) << start_bit_idx_; low_storage_mask_ = StorageUnit(full_element_mask_ & ~StorageUnit(0)); high_storage_mask_ = StorageUnit((full_element_mask_ >> sizeof_bits<StorageUnit>::value) & ~StorageUnit(0)); } public: CUTLASS_HOST_DEVICE ConstSubbyteReference(): ptr_(nullptr), offset_(0) { } /// Constructor CUTLASS_HOST_DEVICE ConstSubbyteReference( Element const *ptr, /// pointer to memory int64_t offset /// logical offset in units of Element ): ptr_(reinterpret_cast<StorageVecPointer>(ptr)), offset_(0) { int64_t offset_in_vectors = offset / kElementsPerVector; int64_t offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = int(offset_in_elements); update_element_status(); } /// Constructor CUTLASS_HOST_DEVICE ConstSubbyteReference( Element *ptr = nullptr ): ConstSubbyteReference(ptr, 0) { } /// Gets storage pointer CUTLASS_HOST_DEVICE StorageVecPointer storage_pointer() const { return ptr_; } /// Gets element offset within storage vector CUTLASS_HOST_DEVICE int element_offset() const { return offset_; } /// Unpacks an element from memory CUTLASS_HOST_DEVICE Element get() const { StorageUnit low_bits = (*ptr_)[low_storage_unit_idx_] & low_storage_mask_; StorageUnit high_bits = low_storage_unit_idx_ != high_storage_unit_idx_ ? (*ptr_)[high_storage_unit_idx_] & high_storage_mask_ : 0; uint64_t full_item = ((uint64_t)high_bits << sizeof_bits<StorageUnit>::value) | low_bits; uint8_t result = uint8_t(full_item >> start_bit_idx_); return reinterpret_cast<Element const &>(result); } /// Unpacks an element from memory CUTLASS_HOST_DEVICE operator Element() const { return get(); } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator+=(int offset) { offset += offset_; int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ += offset_in_vectors; offset_ = offset_in_elements; update_element_status(); return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator+=(long long offset) { offset += offset_; long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ += offset_in_vectors; offset_ = offset_in_elements; update_element_status(); return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator-=(int offset) { int offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = offset % kElementsPerVector; ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } update_element_status(); return *this; } /// Adds an offset in units of elements to the reference CUTLASS_HOST_DEVICE ConstSubbyteReference &operator-=(long long offset) { long long offset_in_vectors = offset / kElementsPerVector; int offset_in_elements = int(offset % kElementsPerVector); ptr_ -= offset_in_vectors; offset_ -= offset_in_elements; if (offset_ < 0) { offset_ += kElementsPerVector; --ptr_; } update_element_status(); return *this; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator+(int offset) const { ConstSubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator+(long long offset) const { ConstSubbyteReference ref(ptr_, offset_); ref += offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator-(int offset) const { ConstSubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Returns a reference to an element with a given offset from the current reference CUTLASS_HOST_DEVICE ConstSubbyteReference operator-=(long long offset) const { ConstSubbyteReference ref(ptr_, offset_); ref -= offset; return ref; } /// Computes the difference in elements between references CUTLASS_HOST_DEVICE ptrdiff_t operator-(ConstSubbyteReference ref) const { return (ptr_ - ref.ptr_) * kElementsPerVector + (offset_ - ref.offset_); } /// Explicit cast to int CUTLASS_HOST_DEVICE explicit operator int() const { return int(get()); } /// Explicit cast to signed 64-bit integer CUTLASS_HOST_DEVICE explicit operator int64_t() const { return int64_t(get()); } /// Explicit cast to unsigned 64-bit integer CUTLASS_HOST_DEVICE explicit operator uint64_t() const { return uint64_t(get()); } /// Explicit cast to float CUTLASS_HOST_DEVICE explicit operator float() const { return float(get()); } /// Explicit cast to double CUTLASS_HOST_DEVICE explicit operator double() const { return double(get()); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Element, bool subbyte = (sizeof_bits<Element>::value < 8)> struct ReferenceFactory; template <typename Element> struct ReferenceFactory<Element, false> { ///! Number of elements per storage vector static int const kElementsPerVector = 1; CUTLASS_HOST_DEVICE static Element &get(Element *ptr, int64_t offset) { return ptr[offset]; } CUTLASS_HOST_DEVICE static Element const &get(Element const *ptr, int64_t offset) { return ptr[offset]; } CUTLASS_HOST_DEVICE static Element *add_pointer_offset(Element *ptr, int64_t offset) { return ptr + offset; } CUTLASS_HOST_DEVICE static Element const *add_pointer_offset(Element const *ptr, int64_t offset) { return ptr + offset; } }; template <typename Element> struct ReferenceFactory<Element, true> { // // Static methods // CUTLASS_HOST_DEVICE static SubbyteReference<Element> get(Element *ptr, int64_t offset) { return SubbyteReference<Element>(ptr, offset); } CUTLASS_HOST_DEVICE static ConstSubbyteReference<Element> get(Element const *ptr, int64_t offset) { return ConstSubbyteReference<Element>(ptr, offset); } /// Helper to add an offset in number of elements, assuming this offset is divisible /// by the vector size. CUTLASS_HOST_DEVICE static Element *add_pointer_offset(Element *ptr, int64_t offset_in_elements) { return ptr + offset_in_elements * sizeof_bits<Element>::value / sizeof(Element) / 8; } /// Helper to add an offset in number of elements, assuming this offset is divisible /// by the vector size. CUTLASS_HOST_DEVICE static Element const *add_pointer_offset(Element const *ptr, int64_t offset_in_elements) { return ptr + offset_in_elements * sizeof_bits<Element>::value / sizeof(Element) / 8; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass
include/cutlass/subbyte_reference.h/0
{ "file_path": "include/cutlass/subbyte_reference.h", "repo_id": "include", "token_count": 13938 }
38
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Ell iterator for Blocked-Ell matrix (ellValue matrix) used with EllMmaMultistage */ #pragma once #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/matrix_shape.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// EllPredicatedTileAccessIterator /// template <typename Shape, typename Element, typename Layout, int AdvanceRank, typename ThreadMap, typename AccessType> class EllPredicatedTileAccessIterator; //////////////////////////////////////////////////////////////////////////////// /// Specialization of EllPredicatedTileAccessIterator for pitch-linear data. /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class EllPredicatedTileAccessIterator<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, AccessType_> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::PitchLinear; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); static int const kPredicatesPerByte = 4; static int const kPredicatesPerWord = 4 * kPredicatesPerByte; static int const kPredicateCount = ThreadMap::Iterations::kCount * kAccessesPerVector; /// Number of 32b words containing predicates static int const kPredicateByteCount = (kPredicateCount + kPredicatesPerByte - 1) / kPredicatesPerByte; static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4; static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u; static_assert(kPredicateWordCount <= 4, "Too many predicates."); /// Predicate vector stores mask to guard accesses using Mask = Array<uint32_t, kPredicateWordCount>; /// Parameters object is precomputed state and is host-constructible class Params { public: friend EllPredicatedTileAccessIterator; private: /// stride of pitch-linear layout (units of Element) LongIndex stride_; /// amount (in byte) to increment pointer to move to next access along /// strided dimension LongIndex inc_strided_; /// amount (in byte) to increment pointer from last access to first access /// of next tile LongIndex inc_next_; /// amount (in byte) to increment pointer from first access of current tile /// to first access of next tile LongIndex inc_advance_; public: // Default ctor CUTLASS_HOST_DEVICE Params(): stride_(0), inc_strided_(0), inc_next_(0), inc_advance_(0) { } /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : stride_(layout.stride(0)) { inc_strided_ = (LongIndex(stride_) * ThreadMap::Delta::kStrided) * sizeof_bits<Element>::value / 8; if (kAdvanceRank) { // advance along strided dimension inc_advance_ = Shape::kStrided * LongIndex(stride_) * sizeof_bits<Element>::value / 8; } else { // advance along contiguous dimension inc_advance_ = Shape::kContiguous * sizeof_bits<Element>::value / 8; } inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kStrided - 1) * ThreadMap::Delta::kStrided * LongIndex(stride_) * sizeof_bits<Element>::value / 8; }; }; private: /// Internal pointer type permits fast address arithmetic using BytePointer = char *; private: // // Data members // /// Parameters object with precomputed internal state Params const &params_; /// Internal pointer to first access of tile BytePointer pointer_; /// Guard predicates uint32_t predicates_[kPredicateWordCount]; /// Size of tensor TensorCoord extent_; /// Initial offset for each thread TensorCoord thread_offset_; /// Offset to the first steady-state tile TensorCoord residue_offset_; /// Initial offset to define ELL block TensorCoord ell_offset_; /// Used for out-of-order visitation bool is_residue_tile_; /// Iteration along vectors implied by the thread map int iteration_vector_; /// Iteration in the contiguous dimension int iteration_contiguous_; /// Iteration in the strided dimension int iteration_strided_; public: /// Computes predicates based on internally tracked per-thread offset. CUTLASS_DEVICE void compute_predicates_( /// Extent of the matrix window TensorCoord extent, /// optionally, simplify predicate calculation during 'steady state' phase bool is_steady_state = false) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = 0u; } CUTLASS_PRAGMA_UNROLL for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) { int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector); int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector); int c = access_residual / kAccessesPerVector; int v = access_residual % kAccessesPerVector; TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements, s * ThreadMap::Delta::kStrided); TensorCoord coord = thread_offset_ + iteration_coord; bool guard; if (is_steady_state) { if (kAdvanceRank == 0) { guard = (coord.strided() < extent.strided()); } else { guard = (coord.contiguous() < extent.contiguous()); } } else { guard = (coord.strided() < extent.strided() && coord.contiguous() < extent.contiguous()); } int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s); int word_idx = pred_idx / kPredicatesPerWord; int residual = pred_idx % kPredicatesPerWord; int byte_idx = residual / kPredicatesPerByte; int bit_idx = residual % kPredicatesPerByte; predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx)); } } public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const &threadblock_offset) : params_(params), pointer_(reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(pointer))), extent_(extent), is_residue_tile_(true) { TensorCoord residue_extent; if (kAdvanceRank) { typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.strided()) % Shape::kStrided; if (!residue_size) { residue_size = Shape::kStrided; } residue_offset_ = make_Coord(0, residue_size); residue_extent = make_Coord( extent_.contiguous(), min(threadblock_offset.strided() + residue_size, extent_.strided()) ); } else { typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.contiguous()) % Shape::kContiguous; if (!residue_size) { residue_size = Shape::kContiguous; } residue_offset_ = make_Coord(residue_size, 0); residue_extent = make_Coord( min(extent_.contiguous(), threadblock_offset.contiguous() + residue_size), extent_.strided() ); } // Per-thread offset in logical coordinates of tensor ell_offset_ = ThreadMap::initial_offset(thread_id); thread_offset_ = threadblock_offset + ThreadMap::initial_offset(thread_id); // update internal pointers Layout layout(params_.stride_); add_pointer_offset(layout(thread_offset_)); compute_predicates_(residue_extent, false); set_iteration_index(0); } /// Construct a EllPredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id) : EllPredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iteration_vector_ = index % kAccessesPerVector; int residual_access = index / kAccessesPerVector; iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous; iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += sizeof_bits<Element>::value * pointer_offset / 8; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_DEVICE void add_tile_offset( TensorCoord const &tile_offset) { if (is_residue_tile_) { thread_offset_ += residue_offset_; Layout layout(params_.stride_); add_pointer_offset(layout(residue_offset_)); compute_predicates_(extent_, true); if (kAdvanceRank) { pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided() - 1); pointer_ += Shape::kContiguous * tile_offset.contiguous(); } else { pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous() - 1); pointer_ += Shape::kStrided * tile_offset.strided(); } } else { if (kAdvanceRank) { pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided()); pointer_ += Shape::kContiguous * tile_offset.contiguous(); } else { pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous()); pointer_ += Shape::kStrided * tile_offset.strided(); } } is_residue_tile_ = false; } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>( pointer_ + iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value) / 8) + iteration_vector_; } /// Returns a k_location CUTLASS_HOST_DEVICE int get_k() const { if(kAdvanceRank){ //strided return ell_offset_.strided() + iteration_strided_ * ThreadMap::Delta::kStrided; }else{ return ell_offset_.contiguous() + iteration_contiguous_ * ThreadMap::Delta::kContiguous + iteration_vector_ * AccessType::kElements; } } CUTLASS_HOST_DEVICE int get_stride() const { if(kAdvanceRank) return params_.stride_; else return 1; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator &operator++() { ++iteration_vector_; if (iteration_vector_ < kAccessesPerVector) { return *this; } iteration_vector_ = 0; ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } // Enter here only if (iteration_contiguous_ == // ThreadMap::Iteration::kContiguous) iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { pointer_ += params_.inc_strided_; return *this; } // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) // which means we enter the next tile. iteration_strided_ = 0; // advance to next tile pointer_ += params_.inc_next_; // now return to start tile - if the iterator is subsequently advanced, this // subtraction as well as the subsequent integer addition are both elided by // the compiler. pointer_ -= params_.inc_advance_; return *this; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator operator++(int) { EllPredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = enable ? 0u : predicates_[i]; } } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = 0xffffffff; } } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = mask[i]; } } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { mask[i] = predicates_[i]; } } /// add mask for small tiles in ELL CUTLASS_DEVICE void ell_add_mask(int blocksize) { Mask mask; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { mask[i] = 0u; } CUTLASS_PRAGMA_UNROLL for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) { int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector); int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector); int c = access_residual / kAccessesPerVector; int v = access_residual % kAccessesPerVector; TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements, s * ThreadMap::Delta::kStrided); TensorCoord coord = ell_offset_ + iteration_coord; bool guard; if (kAdvanceRank == 0) { guard = (coord.strided() < blocksize); } else { guard = (coord.contiguous() < blocksize); } int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s); int word_idx = pred_idx / kPredicatesPerWord; int residual = pred_idx % kPredicatesPerWord; int byte_idx = residual / kPredicatesPerByte; int bit_idx = residual % kPredicatesPerByte; mask[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx)); } CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { mask[i] &= predicates_[i]; } set_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { int pred_idx = iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous); int word_idx = pred_idx / kPredicatesPerWord; int residual = pred_idx % kPredicatesPerWord; int byte_idx = residual / kPredicatesPerByte; int bit_idx = residual % kPredicatesPerByte; bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0; return pred; } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of EllPredicatedTileAccessIterator for pitch-linear data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class EllPredicatedTileAccessIterator<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, AccessType_> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = EllPredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Parameters object is precomputed state and is host-constructible class Params { private: friend EllPredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default ctor CUTLASS_HOST_DEVICE Params() { } /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))){}; }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.row(), extent.column()), thread_id, layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())) {} /// Construct a EllPredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : EllPredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } CUTLASS_HOST_DEVICE int get_k() const { return iterator_.get_k(); } CUTLASS_HOST_DEVICE int get_stride() const { return iterator_.get_stride(); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator operator++(int) { EllPredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// add mask for small tiles in ELL CUTLASS_DEVICE void ell_add_mask(int blocksize) { iterator_.ell_add_mask(blocksize); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of EllPredicatedTileAccessIterator for pitch-linear data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class EllPredicatedTileAccessIterator<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, AccessType_> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = EllPredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend EllPredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default ctor CUTLASS_HOST_DEVICE Params() { } /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))){}; }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.column(), extent.row()), thread_id, layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())) {} /// Construct a EllPredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : EllPredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } CUTLASS_HOST_DEVICE int get_k() const { return iterator_.get_k(); } CUTLASS_HOST_DEVICE int get_stride() const { return iterator_.get_stride(); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator operator++(int) { EllPredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// add mask for small tiles in ELL CUTLASS_DEVICE void ell_add_mask(int blocksize) { iterator_.ell_add_mask(blocksize); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of EllPredicatedTileAccessIterator for column-major interleaved data. /// It is mapped to the congruous layout. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, int InterleavedK> class EllPredicatedTileAccessIterator<Shape_, Element_, layout::ColumnMajorInterleaved<InterleavedK>, AdvanceRank, ThreadMap_, AccessType_> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; static int const kInterleavedK = InterleavedK; using Layout = layout::ColumnMajorInterleaved<kInterleavedK>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = EllPredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kRow * kInterleavedK, Shape::kColumn / kInterleavedK>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend EllPredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: CUTLASS_HOST_DEVICE Params() {} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const &threadblock_offset) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.row() * kInterleavedK, extent.column() / kInterleavedK), thread_id, layout::PitchLinearCoord( threadblock_offset.row() * kInterleavedK, threadblock_offset.column() / kInterleavedK)) {} /// Construct a EllPredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : EllPredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } CUTLASS_HOST_DEVICE int get_k() const { return iterator_.get_k(); } CUTLASS_HOST_DEVICE int get_stride() const { return iterator_.get_stride(); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator operator++(int) { EllPredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// add mask for small tiles in ELL CUTLASS_DEVICE void ell_add_mask(int blocksize) { iterator_.ell_add_mask(blocksize); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of EllPredicatedTileAccessIterator for row-major interleaved data. /// It is mapped to the congruous layout. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, int InterleavedK> class EllPredicatedTileAccessIterator<Shape_, Element_, layout::RowMajorInterleaved<InterleavedK>, AdvanceRank, ThreadMap_, AccessType_> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; static int const kInterleavedK = InterleavedK; using Layout = layout::RowMajorInterleaved<kInterleavedK>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = EllPredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kColumn * kInterleavedK, Shape::kRow / kInterleavedK>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend EllPredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: CUTLASS_HOST_DEVICE Params() {} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const &threadblock_offset) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.column() * kInterleavedK, extent.row() / kInterleavedK), thread_id, layout::PitchLinearCoord( threadblock_offset.column() * kInterleavedK, threadblock_offset.row() / kInterleavedK)) {} /// Construct a EllPredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : EllPredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } CUTLASS_HOST_DEVICE int get_k() const { return iterator_.get_k(); } CUTLASS_HOST_DEVICE int get_stride() const { return iterator_.get_stride(); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE EllPredicatedTileAccessIterator operator++(int) { EllPredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// add mask for small tiles in ELL CUTLASS_DEVICE void ell_add_mask(int blocksize) { iterator_.ell_add_mask(blocksize); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/transform/threadblock/ell_predicated_tile_access_iterator.h/0
{ "file_path": "include/cutlass/transform/threadblock/ell_predicated_tile_access_iterator.h", "repo_id": "include", "token_count": 15640 }
39
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing computing the addresses of storing of tiles from pitch-linear rank=2 tensors. */ #pragma once #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor_op_multiplicand_sm75.h" #include "cutlass/matrix_coord.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for congruous arrangements for TensorOps /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileAccessIterator< Shape_, Element_, layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; static int const kCrosswise = Crosswise; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using StrideIndex = typename Layout::Stride::Index; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Internal details made public to facilitate introspection struct Detail { /// This iterator is specialized for an access size that is 128 bits in /// length. static int const kAccessSizeInBits = 128; static_assert(sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits, "This iterator requires a policy whose access size is 128bs"); ///< Number of pointers static int const kPointerCount = (ThreadMap::Iterations::kStrided > 1 ? 2 : 1); }; /// Element type per access using AccessType = Array<Element, Layout::kElementsPerAccess>; private: // // Data members // /// Stride value StrideIndex stride_; /// Internal pointer to first access of tile AccessType *pointer_[Detail::kPointerCount]; /// Internal byte offset Index byte_offset_; /// Iteration in the contiguous dimension int iteration_contiguous_; /// Iteration in the strided dimension int iteration_strided_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : stride_(ref.stride(0) * Layout::kFactor / Layout::kElementsPerAccess), byte_offset_(0) { layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Detail::kPointerCount; ++i) { // This is the offset of a thread within a threadblock tile for a specific // pointer (units of elements) layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base + layout::PitchLinearCoord{ 0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i}; // initialize pointer pointer_[i] = reinterpret_cast<AccessType *>( ref.data() + ref.offset(thread_offset_in_threadblock_tile)); } set_iteration_index(0); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; iteration_strided_ = index / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_offset_ += pointer_offset * sizeof(Element); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { AccessType *access_ptr = pointer_[iteration_strided_ & 1]; int stride_idx = (iteration_strided_ & ~1); int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ / Layout::kFactor + iteration_contiguous_ * ThreadMap::Delta::kContiguous / ThreadMap::kElementsPerAccess; char *access_byte_ptr = reinterpret_cast<char *>(access_ptr + access_offset); return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator &operator++() { ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) return *this; // Enter here only if (iteration_contiguous_ == // ThreadMap::Iteration::kContiguous) iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } // Enter here only if (iteration_strided_ == ThreadMap::Iteration::kStrided) // which means we enter the next tile. iteration_strided_ = 0; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator operator++(int) { RegularTileAccessIterator prev(*this); this->operator++(); return prev; } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { add_pointer_offset(coord.contiguous() * Shape::kContiguous * Layout::kFactor + coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess / Layout::kFactor); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile Iterator specialized for column-major congruous TensorOp formats. /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileAccessIterator< Shape_, Element_, layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for column-major iterator may along advance along the " "columns(rank=0) or rows(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileAccessIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, Crosswise>, (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; using AccessType = typename UnderlyingIterator::AccessType; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : iterator_({ref.data(), ref.stride()}, thread_id) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.row(), coord.column()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator operator++(int) { RegularTileAccessIterator prev(*this); ++iterator_; return prev; } }; //////////////////////////////////////////////////////////////////////////////// /// Tile Iterator specialized for row-major congruous TensorOp formats. /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileAccessIterator< Shape_, Element_, layout::RowMajorTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for row-major iterator may along advance along the " "columns(rank=0) or rows(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajorTensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileAccessIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, Crosswise>, (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; using AccessType = typename UnderlyingIterator::AccessType; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : iterator_({ref.data(), ref.stride()}, thread_id) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.column(), coord.row()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator operator++(int) { RegularTileAccessIterator prev(*this); ++iterator_; return prev; } }; //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for crosswise arrangements for TensorOps /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileAccessIterator<Shape_, Element_, layout::TensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; static int const kCrosswise = Crosswise; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using StrideIndex = typename Layout::Stride::Index; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; static_assert(!(ThreadMap::Delta::kContiguous % kCrosswise), "kCrosswise is the smallest unit in the contiguous dimension " "for shared memory swizzling."); /// Internal details made public to facilitate introspection struct Detail { /// This iterator is specialized for an access size that is 128 bits in /// length. static int const kAccessSizeInBits = 128; static_assert(sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits, "This iterator requires a policy whose access size is 128bs"); /// Number of pointers /// /// Note:TN kblock32 layouts only needs 1 pointer, but strangely /// reducing pointer count hurts perfomrnace static int const kPointerCount = (ThreadMap::Iterations::kStrided > 1 ? 2 : 1); }; /// Element type per access using AccessType = Array<Element, Layout::kElementsPerAccess>; private: // // Data members // /// Total number of sections. The memory is divided into stages. One stage /// can store one tile. Stage is divided into sections. Interleaved layout /// can have multiple sections in a stage. The rest layout only has one section /// in a stage. int sections_; /// Sections that a stage has int sections_per_stage_; /// Stride value StrideIndex stride_; /// Internal pointer to first access of tile AccessType *pointer_[Detail::kPointerCount]; /// Internal byte offset Index byte_offset_; /// Iteration in the contiguous dimension int iteration_contiguous_; /// Iteration in the strided dimension int iteration_strided_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : sections_(ref.stride(0) / kCrosswise), sections_per_stage_(Shape::kContiguous / kCrosswise), // stride_ = kCrosswise x sections_ x kFactor stride_(ref.stride(0) * Layout::kFactor / Layout::kElementsPerAccess), byte_offset_(0) { layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Detail::kPointerCount; ++i) { // This is the offset of a thread within a threadblock tile for a specific // pointer (units of elements) layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base + layout::PitchLinearCoord{ 0, ThreadMap::Detail::WarpThreadArrangement::kStrided * i}; // initialize pointer pointer_[i] = reinterpret_cast<AccessType *>(ref.data()) + ref.offset(thread_offset_in_threadblock_tile) / Layout::kElementsPerAccess; } set_iteration_index(0); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; iteration_strided_ = index / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_offset_ += pointer_offset * sizeof_bits<Element>::value / 8; } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { AccessType *access_ptr = pointer_[iteration_strided_ & 1]; int stride_idx = (iteration_strided_ & ~1); int access_offset = stride_idx * ThreadMap::Delta::kStrided * stride_ / Layout::kFactor + // kCrosswise elements in the contiguous dimension would span to a // shared memory cache line. iteration_contiguous_ * (ThreadMap::Delta::kContiguous / kCrosswise) * Layout::TileShape::kContiguous; char *access_byte_ptr = reinterpret_cast<char *>(access_ptr + access_offset); return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator &operator++() { ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) return *this; // Enter here only if (iteration_contiguous_ == // ThreadMap::Iteration::kContiguous) iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } // Enter here only if (iteration_strided_ == ThreadMap::Iteration::kStrided) // which means we enter the next section. iteration_strided_ = 0; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator operator++(int) { RegularTileAccessIterator prev(*this); this->operator++(); return prev; } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { add_pointer_offset(coord.contiguous() * sections_per_stage_ * stride_ * ThreadMap::kElementsPerAccess / sections_ + coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess / Layout::kFactor); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile Iterator specialized for column-major crosswise TensorOp formats. /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileAccessIterator< Shape_, Element_, layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for column-major iterator may along advance along the " "columns(rank=0) or rows(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileAccessIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value, Crosswise>, (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; using AccessType = typename UnderlyingIterator::AccessType; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : iterator_({ref.data(), ref.stride()}, thread_id) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.row(), coord.column()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator operator++(int) { RegularTileAccessIterator prev(*this); ++iterator_; return prev; } }; //////////////////////////////////////////////////////////////////////////////// /// Tile Iterator specialized for row-major crosswise TensorOp formats. /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileAccessIterator<Shape_, Element_, layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for row-major iterator may along advance along the " "columns(rank=0) or rows(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileAccessIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value, Crosswise>, (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; using AccessType = typename UnderlyingIterator::AccessType; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : iterator_({ref.data(), ref.stride()}, thread_id) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.column(), coord.row()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileAccessIterator operator++(int) { RegularTileAccessIterator prev(*this); ++iterator_; return prev; } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h/0
{ "file_path": "include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h", "repo_id": "include", "token_count": 9771 }
40
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Python AST frontend that parses input into DAG IR """ import ast import inspect import textwrap from cutlass_library import DataType import cutlass from cutlass.backend.evt.frontend.frontend_base import EVTFrontendBase from cutlass.backend.epilogue import relu from cutlass.backend.library import FunctionalOp class PythonASTFrontend(EVTFrontendBase, ast.NodeVisitor): def __init__(self, element_compute=DataType.f32, **kwargs): super().__init__(element_compute, **kwargs) # Flags # If this state is True, visit_Constant returns values without creating imm node self.no_imm = False self.visiting_return = False def parse(self, example_inputs): self.example_inputs = example_inputs self.source = textwrap.dedent(inspect.getsource(self.__call__)) self.ast = ast.parse(self.source) self.visit(self.ast) # # Helper functions # @staticmethod def ast_op_to_bindings(op): mapping = { ast.Add: FunctionalOp.Plus, ast.Sub: FunctionalOp.Minus, ast.Mult: FunctionalOp.Multiplies, ast.Div: FunctionalOp.Divides, "relu": relu.binding_type, "multiply_add": FunctionalOp.MultiplyAdd, "sum": (FunctionalOp.Plus, FunctionalOp.AtomicAdd), "max": (FunctionalOp.Maximum, FunctionalOp.AtomicMaximum) } return mapping[op] # # Visiting different node types # def visit_FunctionDef(self, node: ast.FunctionDef): # Visit args and register load nodes for arg in node.args.args: self.visit(arg) for expr in node.body: self.visit(expr) def visit_arg(self, node: ast.arg): # Name of the argument name = node.arg try: example_tensor = self.example_inputs[name] except: raise RuntimeError(f"Example input for {name} is not provided.") self.add_load_node(name, example_tensor) def visit_Name(self, node: ast.Name): return node.id def visit_Constant(self, node: ast.Constant): if self.no_imm: return node.value else: name = self.add_imm(node.value) return name def visit_Tuple(self, node: ast.Tuple): results = [] for elt in node.elts: results.append(self.visit(elt)) return tuple(results) def visit_keyword(self, node: ast.keyword): return {node.arg: self.visit(node.value)} def visit_BinOp(self, node: ast.BinOp): if self.visiting_return: raise SyntaxError("Return value cannot be an expression") lhs = self.visit(node.left) rhs = self.visit(node.right) op = self.ast_op_to_bindings(type(node.op)) name = self.add_compute_node(op) # Add edges # The edge weights are used to sort the input args self.add_edge(lhs, name, weight=0) self.add_edge(rhs, name, weight=1) return name def visit_Assign(self, node: ast.BinOp): target = self.visit(node.targets[0]) value = self.visit(node.value) # Create the assign node self.add_store_node(target) # Add edges self.add_edge(value, target) return target def visit_Call(self, node: ast.Call): if self.visiting_return: raise SyntaxError("Return value cannot be an expression") func = self.visit(node.func) args = [self.visit(arg) for arg in node.args] if func in self.layout_fns.keys(): # Parse kwargs # By default, visiting imm automatically creates a load node # However, in function call, keyword args are used to set # specific function attributes such as indices for permute # So no_imm is set to True temporarily self.no_imm = True kwargs = {} for kw in node.keywords: kwargs.update(self.visit(kw)) self.no_imm = False op = self.layout_fns[func] name = self.add_layout_node(op, kwargs) else: op = self.ast_op_to_bindings(func) name = self.add_compute_node(op) # Add edges for idx, arg in enumerate(args): self.add_edge(arg, name, weight=idx) return name def visit_Return(self, node: ast.Return): self.visiting_return = True results = self.visit(node.value) self.visiting_return = False self.return_names = results if not isinstance(results, tuple): results = (results,) for rst in results: try: example_tensor = self.example_inputs[rst] except: raise RuntimeError(f"Example input for {rst} is not provided.") self.set_store_tensor(rst, example_tensor) self.mark_output(rst)
python/cutlass/backend/evt/frontend/python_ast.py/0
{ "file_path": "python/cutlass/backend/evt/frontend/python_ast.py", "repo_id": "python", "token_count": 2728 }
41
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Eliminate layout manipulation nodes """ from copy import deepcopy from cutlass.backend.evt.ir import DAGIR, LayoutNode from cutlass.backend.evt.passes.pass_manager import EVTPassBase from cutlass.backend.evt.passes.pass_shape_type_propagation import PassShapeTypePropagation class PassLayoutManipulateElimination(EVTPassBase): """ Eliminate layout manipulation nodes """ dependencies = [PassShapeTypePropagation] def __init__(self, dag_ir: DAGIR) -> None: super().__init__(dag_ir) self.copy_cnt = 0 def call(self): self.layout_nodes_worklist = self.get_all_layout_nodes() # Run while loop utill all layout nodes are eliminated while(len(self.layout_nodes_worklist) > 0): node = self.layout_nodes_worklist.pop(0) # for node in layout_nodes: # Step 1: get the propagation direction direction = self.get_propagation_direction(node) self.visited = [] getattr(self, f"propagate_to_{direction}")(self.dag_ir.get_node_meta(node), node) # Eliminate the current node input_node = self.dag_ir.get_all_inputs(node)[0] self.dag_ir.replace_all_uses_with(node, input_node) # layout_nodes = self.get_all_layout_nodes() def get_all_layout_nodes(self): layout_nodes = [] for node_meta in reversed(self.dag_ir.node_metas_topological_order()): if isinstance(node_meta, LayoutNode): layout_nodes.append(node_meta.name) return layout_nodes def get_propagation_direction(self, node: str): """ The logic is propagating all layout nodes away from the accumulator node. """ self.visited = [] self.get_influenced_users(node) nodes_influenced_dir_users = self.visited self.visited = [] self.get_influenced_inputs(node) nodes_influenced_dir_inputs = self.visited if "accum" in nodes_influenced_dir_users and "accum" not in nodes_influenced_dir_inputs: return "inputs" elif "accum" not in nodes_influenced_dir_users and "accum" in nodes_influenced_dir_inputs: return "users" else: raise RuntimeError("Unsolved propagation direction") # Get all influenced nodes if we propagate along the user direction def get_influenced_users(self, node: str): if node in self.visited: return self.visited.append(node) users = self.dag_ir.get_users(node) for user in users: self.get_influenced_users(user) user_inputs = [] for user in users: user_inputs.append(set(self.dag_ir.get_all_inputs(user))) if len(user_inputs) > 0: user_inputs = set.union(*user_inputs) user_inputs.remove(node) for input in user_inputs: self.get_influenced_inputs(input) # Get all influenced nodes if we propagate along the input direction def get_influenced_inputs(self, node: str): if node in self.visited: return self.visited.append(node) inputs = self.dag_ir.get_all_inputs(node) for input in inputs: self.get_influenced_inputs(input) input_users = [] for input in inputs: input_users.append(set(self.dag_ir.get_users(input))) if len(input_users) > 0: input_users = set.union(*input_users) input_users.remove(node) for user in input_users: self.get_influenced_users(user) def add_copy_before(self, layout_node_meta: LayoutNode, target: str): copied_node_meta = deepcopy(layout_node_meta) copied_node = f"{copied_node_meta.name}_copy{self.copy_cnt}" self.copy_cnt += 1 copied_node_meta.name = copied_node self.dag_ir.add_node(copied_node_meta) # Add edges target_inputs = self.dag_ir.get_all_inputs(target) for src in target_inputs: self.dag_ir.remove_edge(src, target) self.dag_ir.add_edge(src, copied_node) self.dag_ir.add_edge(copied_node, target) self.layout_nodes_worklist.append(copied_node) def add_copy_after(self, layout_node_meta: LayoutNode, target: str): copied_node_meta = deepcopy(layout_node_meta) copied_node = f"{copied_node_meta.name}_copy{self.copy_cnt}" self.copy_cnt += 1 copied_node_meta.name = copied_node self.dag_ir.add_node(copied_node_meta) # Add edges users = self.dag_ir.get_users(target) for user in users: self.dag_ir.remove_edge(target, user) self.dag_ir.add_edge(copied_node, user) self.dag_ir.add_edge(target, copied_node) self.layout_nodes_worklist.append(copied_node) # Propagate the layout `node` along the user direction def propagate_to_users(self, layout_node_meta: LayoutNode, node: str): """ Propagate layout node to users """ if node in self.visited: # Avoid applying twice return self.visited.append(node) node_meta = self.dag_ir.get_node_meta(node) if layout_node_meta.name != node: if isinstance(node_meta, LayoutNode): # Layout node is not transparent with layout node self.add_copy_before(layout_node_meta, node) return else: layout_node_meta.apply_to_user(node_meta) users = self.dag_ir.get_users(node) user_inputs = [] for user in users: user_inputs.append(set(self.dag_ir.get_all_inputs(user))) for user in users: self.propagate_to_users(layout_node_meta, user) if len(user_inputs) > 0: user_inputs = set.union(*user_inputs) user_inputs.remove(node) for input in user_inputs: self.propagate_to_inputs(layout_node_meta.get_inverse_node(), input) # Propagate the layout `node` along the input direction def propagate_to_inputs(self, layout_node_meta: LayoutNode, node: str): """ Propagate layout node to inputs """ if node in self.visited: # Avoid applying twice return self.visited.append(node) node_meta = self.dag_ir.get_node_meta(node) if layout_node_meta.name != node: if isinstance(node_meta, LayoutNode): # Layout node is not transparent with layout node self.add_copy_after(layout_node_meta, node) return else: layout_node_meta.apply_to_input(node_meta) inputs = self.dag_ir.get_all_inputs(node) input_users = [] for input in inputs: input_users.append(set(self.dag_ir.get_users(input))) for input in inputs: self.propagate_to_inputs(layout_node_meta, input) if len(input_users) > 0: input_users = set.union(*input_users) input_users.remove(node) for user in input_users: self.propagate_to_users(layout_node_meta.get_inverse_node(), user)
python/cutlass/backend/evt/passes/pass_layout_elimination.py/0
{ "file_path": "python/cutlass/backend/evt/passes/pass_layout_elimination.py", "repo_id": "python", "token_count": 3853 }
42
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Utility functions for converting between frontend datatypes and CUTLASS datatypes """ import cutlass from cutlass_library import ( DataTypeSize, MathOperation, MathInstruction ) from cutlass.backend.library import ( TileDescription, ) bfloat16_available = None cupy_available = None numpy_available = None torch_available = None _library_to_cupy_dict = None _library_to_numpy_dict = None _library_to_torch_dict = None _torch_to_library_dict = None def is_numpy_available(): global numpy_available, _library_to_numpy_dict if numpy_available is None: try: import numpy as np numpy_available = True _library_to_numpy_dict = { cutlass.DataType.f16: np.float16, cutlass.DataType.f32: np.float32, cutlass.DataType.f64: np.float64, cutlass.DataType.s8: np.int8, cutlass.DataType.s32: np.int32, } except ImportError: numpy_available = False _library_to_numpy_dict = {} return numpy_available def is_numpy_tensor(inp) -> bool: if is_numpy_available(): import numpy as np return isinstance(inp, np.ndarray) return False def numpy_library_type(inp) -> cutlass.DataType: if is_numpy_available(): import numpy as np if inp == np.float16: return cutlass.DataType.f16 elif inp == np.float32: return cutlass.DataType.f32 elif inp == np.float64: return cutlass.DataType.f64 elif inp == np.int8: return cutlass.DataType.s8 elif inp == np.int32: return cutlass.DataType.s32 return None def numpy_type(inp): return _library_to_numpy_dict.get(inp, None) def is_cupy_available(): global cupy_available if cupy_available is None: try: import cupy as cp cupy_available = True _library_to_cupy_dict = { cutlass.DataType.f16: cp.float16, cutlass.DataType.f32: cp.float32, cutlass.DataType.f64: cp.float64, cutlass.DataType.s8: cp.int8, cutlass.DataType.s32: cp.int32, } except ImportError: cupy_available = False _library_to_cupy_dict = {} return cupy_available def is_cupy_tensor(inp) -> bool: if is_cupy_available(): import cupy as cp return isinstance(inp, cp.ndarray) return False def cupy_library_type(inp) -> cutlass.DataType: if is_cupy_available(): import cupy as cp if inp == cp.float16: return cutlass.DataType.f16 elif inp == cp.float32: return cutlass.DataType.f32 elif inp == cp.float64: return cutlass.DataType.f64 return None def cupy_type(inp): return _library_to_cupy_dict.get(inp, None) def is_torch_available(): global torch_available, _library_to_torch_dict, _torch_to_library_dict if torch_available is None: try: import torch torch_available = True _torch_to_library_dict = { torch.half: cutlass.DataType.f16, torch.float16: cutlass.DataType.f16, torch.bfloat16: cutlass.DataType.bf16, torch.float: cutlass.DataType.f32, torch.float32: cutlass.DataType.f32, torch.double: cutlass.DataType.f64, torch.float64: cutlass.DataType.f64, torch.int8: cutlass.DataType.s8, torch.int32: cutlass.DataType.s32, torch.uint8: cutlass.DataType.u8, } _library_to_torch_dict = { cutlass.DataType.f16: torch.half, cutlass.DataType.f16: torch.float16, cutlass.DataType.bf16: torch.bfloat16, cutlass.DataType.f32: torch.float, cutlass.DataType.f32: torch.float32, cutlass.DataType.f64: torch.double, cutlass.DataType.f64: torch.float64, cutlass.DataType.s8: torch.int8, cutlass.DataType.s32: torch.int32, cutlass.DataType.u8: torch.uint8, } def possibly_add_type(torch_type_name, cutlass_type): # Only try adding the type if the version of torch being used supports it if hasattr(torch, torch_type_name): torch_type = getattr(torch, torch_type_name) _torch_to_library_dict[torch_type] = cutlass_type _library_to_torch_dict[cutlass_type] = torch_type possibly_add_type("float8_e4m3fn", cutlass.DataType.e4m3) possibly_add_type("float8_e5m2", cutlass.DataType.e5m2) except ImportError: torch_available = False _torch_to_library_dict = {} _library_to_torch_dict = {} return torch_available def is_torch_tensor(inp) -> bool: if is_torch_available(): import torch return isinstance(inp, torch.Tensor) return False def torch_library_type(inp) -> cutlass.DataType: return _torch_to_library_dict.get(inp, None) def torch_type(inp): return _library_to_torch_dict.get(inp, None) def is_bfloat16_available(): global bfloat16_available if bfloat16_available is None: try: import bfloat16 bfloat16_available = True except ImportError: bfloat16_available = False return bfloat16_available def bfloat16_library_type(inp) -> cutlass.DataType: if is_bfloat16_available(): import bfloat16 if inp == bfloat16.bfloat16: return cutlass.DataType.bf16 def bfloat16_type(inp): if is_bfloat16_available(): import bfloat16 if inp == cutlass.DataType.bf16: return bfloat16.bfloat16 def library_type(inp): if inp in DataTypeSize: return inp for cvt_fn in [ bfloat16_library_type, cupy_library_type, numpy_library_type, torch_library_type, ]: out = cvt_fn(inp) if out is not None: return out raise Exception(f"No available conversion from type {inp} to a library type.") def _tensor_from_numpy(np_tensor): dtype = library_type(np_tensor.dtype) if np_tensor.flags.c_contiguous: layout = cutlass.LayoutType.RowMajor elif np_tensor.flags.f_contiguous: layout = cutlass.LayoutType.ColumnMajor return (dtype, layout) def _tensor_from_torch(pt_tensor): dtype = library_type(pt_tensor.dtype) return (dtype, cutlass.LayoutType.RowMajor) def get_datatype_and_layout(tensor): if (is_numpy_tensor(tensor) or is_cupy_tensor(tensor)): return _tensor_from_numpy(tensor) elif is_torch_tensor(tensor): return _tensor_from_torch(tensor) elif isinstance(tensor, float) or isinstance(tensor, int): return (cutlass.DataType.f32, cutlass.LayoutType.RowMajor) else: raise Exception(f"Unable to convert tensor of type {type(tensor)} to Python-bound CUTLASS datatype and layout.") def get_tensor_shape(tensor, op="GEMM"): if (is_numpy_tensor(tensor) or is_cupy_tensor(tensor)): return tensor.shape elif is_torch_tensor(tensor): size = tensor.size() if op == "CONV": # PyTorch Tensors have shape NCHW return (size[0], size[2], size[3], size[1]) else: return tuple(tensor.size()) elif isinstance(tensor, float) or isinstance(tensor, int): return (1,) else: raise Exception(f"Unable to convert tensor of type {type(tensor)} to Python-bound CUTLASS datatype and layout.") _math_operation_value_map = {x.value: x for x in MathOperation} def backend_math_operation(math_op: MathOperation): if math_op.value not in _math_operation_value_map.keys(): raise Exception(f"Unable to convert math operation of type {math_op} to backend math operation.") return _math_operation_value_map[math_op.value] def construct_backend_td(td: cutlass.TileDescription, kernel_schedule: cutlass.KernelScheduleType, epilogue_schedule: cutlass.EpilogueScheduleType, tile_scheduler: cutlass.TileSchedulerType) -> TileDescription: mi = td.math_instruction backend_mi = MathInstruction( mi.instruction_shape, mi.element_a, mi.element_b, mi.element_accumulator, mi.opcode_class, backend_math_operation(mi.math_operation) ) cluster_shape = td.cluster_shape if hasattr(td, "cluster_shape") else [1, 1, 1] return TileDescription(td.threadblock_shape, td.stages, td.warp_count, backend_mi, cluster_shape, kernel_schedule, epilogue_schedule, tile_scheduler) def td_from_profiler_op(op) -> TileDescription: """ Converts the profiler's TileDescription in ``op`` into the backend TileDescription :param op: profiler Operation :returns: backend TileDescription :rtype: cutlass.backend.TileDescription """ kschedule = op.kernel_schedule if hasattr(op, 'kernel_schedule') else None eschedule = op.epilogue_schedule if hasattr(op, 'epilogue_schedule') else None tschedule = op.tile_scheduler if hasattr(op, 'tile_scheduler') else None return construct_backend_td(op.tile_description, kschedule, eschedule, tschedule) def td_from_profiler_td(td: TileDescription) -> TileDescription: """ Converts the profiler's TileDescription into the backend TileDescription :param td: profiler TileDescription :type td: cutlass.TileDescription :returns: backend TileDescription :rtype: cutlass.backend.TileDescription """ return construct_backend_td(td, kernel_schedule=None, epilogue_schedule=None, tile_scheduler=None) def to_camel_case(snake_str): return "".join(x.capitalize() for x in snake_str.lower().split("_")) def getattr_enum(obj, attr_name): # The attr_name is under the snake_case camel_attr = to_camel_case(attr_name) if hasattr(obj, camel_attr): return getattr(obj, camel_attr) else: raise Exception(f"Invalid option: {attr_name}")
python/cutlass/utils/datatypes.py/0
{ "file_path": "python/cutlass/utils/datatypes.py", "repo_id": "python", "token_count": 5250 }
43
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Functions for manipulating IntTuples """ from functools import reduce from itertools import chain from typing import Union from .typing import Integer def is_int(x): return isinstance(x, Integer) def is_tuple(x): return isinstance(x, tuple) def flatten(t): if is_tuple(t): if len(t) == 0: return () else: return tuple(i for a in t for i in flatten(a)) else: return (t,) def signum(a): return bool(a > 0) - bool(a < 0) def product(a): if is_tuple(a): return reduce(lambda val,elem : val*product(elem), a, 1) else: return a def inner_product(a, b): if is_tuple(a): # tuple tuple assert len(a) == len(b) return sum(inner_product(x,y) for x,y in zip(a,b)) else: # "int" "int" assert not is_tuple(b) return a * b def tuple_max(a): if is_tuple(a): return max(tuple_max(x) for x in a) else: return a def elem_scale(a, b): if is_tuple(a): if is_tuple(b): # tuple tuple assert len(a) == len(b) return tuple(elem_scale(x,y) for x,y in zip(a,b)) else: # tuple "int" assert False # Error else: if is_tuple(b): # "int" tuple return elem_scale(a, product(b)) else: # "int" "int" return a * b # Inclusive prefix ceil div with output congruent to input a def shape_div(a, b): if is_tuple(a): if is_tuple(b): # tuple tuple assert len(a) == len(b) return tuple(shape_div(x,y) for x,y in zip(a,b)) else: # tuple "int" #r = [shape_div(a[0],b)] + [shape_div(a[i],b := shape_div(b, product(a[i-1]))) for i in range(1,len(a))] r = [] for v in a: r.append(shape_div(v,b)) b = shape_div(b,product(v)) return tuple(r) else: if is_tuple(b): # "int" tuple return shape_div(a, product(b)) else: # "int" "int" assert a % b == 0 or b % a == 0 #return -(-a // b) # Python exclusive impl: "//" is always floor div if a % b == 0: return a // b else: return signum(a*b) # Exclusive prefix product with output congruent to input a def prefix_product(a, init=1): if is_tuple(a): if is_tuple(init): # tuple tuple assert len(a) == len(init) return tuple(prefix_product(x,i) for x,i in zip(a,init)) else: # tuple "int" #r = [prefix_product(a[0],init)] + [prefix_product(a[i],init := init * product(a[i-1])) for i in range(1,len(a))] r = [] for v in a: r.append(prefix_product(v,init)) init = init * product(v) return tuple(r) else: if is_tuple(init): # "int" tuple assert False # Error else: # "int" "int" return init def idx2crd(idx, shape, stride=None): if stride is None: stride = prefix_product(shape) if is_tuple(idx): if is_tuple(shape): # tuple tuple tuple assert len(idx) == len(shape) and len(idx) == len(stride) return tuple(idx2crd(i, s, d) for i, s, d in zip(idx,shape,stride)) else: # tuple "int" "int" assert False # Error else: if is_tuple(shape): # "int" tuple tuple assert len(shape) == len(stride) return tuple(idx2crd(idx, s, d) for s,d in zip(shape,stride)) else: # "int" "int" "int" return (idx // stride) % shape def crd2idx(crd, shape, stride=None): if stride is None: stride = prefix_product(shape) if is_tuple(crd): if is_tuple(shape): # tuple tuple tuple assert len(crd) == len(shape) and len(crd) == len(stride) return sum(crd2idx(c, s, d) for c, s, d in zip(crd, shape, stride)) else: # tuple "int" "int" assert False, f"crd={crd}, shape={shape}" # Error else: if crd is None: crd = 0 if is_tuple(shape): # "int" tuple tuple assert len(shape) == len(stride) result = 0 for i in range(len(shape)-1): result += crd2idx(crd % product(shape[i]), shape[i], stride[i]) crd = crd // product(shape[i]) return result + crd2idx(crd, shape[-1], stride[-1]) else: # "int" "int" "int" return crd * stride # Transform crd into the dst_shape's iteration space def crd2crd(crd, dst_shape, src_shape=None): if is_tuple(crd): if is_tuple(dst_shape): # tuple tuple assert len(crd) == len(dst_shape) return tuple(crd2crd(x, y) for x, y in zip(crd,dst_shape)) else: # tuple "int" # Ambiguous unless we have src_shape assert src_shape is not None return crd2idx(crd, src_shape) else: if is_tuple(dst_shape): # "int" tuple return idx2crd(crd, dst_shape) else: # "int" "int" assert crd < dst_shape return crd # Filter trg according to crd: keep only elements of trg that are paired with None def slice_(crd: Union[None, tuple, int], trg: Union[tuple, int]): if is_tuple(crd): if is_tuple(trg): # tuple tuple assert len(crd) == len(trg) # match C++ behavior of `filter_tuple` using `tuple_cat(...)` return tuple(chain(*filter(lambda x: x != (), [slice_(c, s) for c, s in zip(crd, trg)]))) else: assert False # tuple "int" : Error elif crd is None: # match C++ behavior `return cute::tuple<B>{b};` return (trg,) else: return () # Determine if None appears at any of an int_tuples' terminals def has_none(a: Union[None, tuple, int]): if is_tuple(a): return any(has_none(v) for v in a) else: return a is None
python/pycute/int_tuple.py/0
{ "file_path": "python/pycute/int_tuple.py", "repo_id": "python", "token_count": 3526 }
44
################################################################################ # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################ """ Unit test for load nodes in SM90 """ import logging import unittest import cutlass from cutlass.backend import * from cutlass.epilogue import * from utils.evt_testbed import EVTTestBed, EVTTestCaseBase cutlass.set_log_level(logging.WARNING) @unittest.skipIf(device_cc() not in [80, 86, 89, 90], "This unittest is only supported on CC [80, 86, 89, 90]") class TestEVTLoad(EVTTestCaseBase): def test_tensor_load(self): """ Load extra tensor with shape [m, n] """ def evt_tensor_load(accum, C, aux, aux_batch): D = accum + C + aux + aux_batch return D for m, n, k, l in self.get_problem_sizes(8): example_inputs = { "accum": self.fake_tensor(self.element, (l, m, n)), "C": self.fake_tensor(self.element, (l, m, n)), "aux": self.fake_tensor(self.element, (m, n)), "aux_batch": self.fake_tensor(np.float32, (l, m, n)), "D": self.fake_tensor(self.element, (l, m, n)), } launcher = EVTTestBed(self.element, evt_tensor_load, example_inputs) input_keys = ["C", "aux", "aux_batch"] result_keys = ["D"] launcher.verify((m, n, k), input_keys, result_keys, l) def test_row_broadcast(self): """ Load extra tensor with shape [1, n] """ def evt_row_broadcast(accum, C, bias, bias_batch): D = accum + C + bias + bias_batch return D for m, n, k, l in self.get_problem_sizes(8): example_inputs = { "accum": self.fake_tensor(self.element, (l, m, n)), "C": self.fake_tensor(self.element, (l, m, n)), "bias": self.fake_tensor(self.element, (n,)), "bias_batch": self.fake_tensor(np.float32, (l, 1, n)), "D": self.fake_tensor(self.element, (l, m, n)), } launcher = EVTTestBed(self.element, evt_row_broadcast, example_inputs) input_keys = ["C", "bias", "bias_batch"] result_keys = ["D"] launcher.verify((m, n, k), input_keys, result_keys, l) def test_column_broadcast(self): """ Load extra tensor with shape [m, 1] """ def evt_column_broadcast(accum, C, bias, bias_batch): D = accum + C + bias + bias_batch return D for m, n, k, l in self.get_problem_sizes(8): example_inputs = { "accum": self.fake_tensor(self.element, (l, m, n)), "C": self.fake_tensor(self.element, (l, m, n)), "bias": self.fake_tensor(self.element, (m, 1)), "bias_batch": self.fake_tensor(np.float32, (l, m, 1)), "D": self.fake_tensor(self.element, (l, m, n)), } launcher = EVTTestBed(self.element, evt_column_broadcast, example_inputs) input_keys = ["C", "bias", "bias_batch"] result_keys = ["D"] launcher.verify((m, n, k), input_keys, result_keys, l) def test_scalar_broadcast(self): """ Load extra tensor with shape [1, 1] """ def evt_scalar_broadcast(accum, C, alpha, alpha_batch): D = accum + C + alpha + alpha_batch return D for m, n, k, l in self.get_problem_sizes(8): example_inputs = { "accum": self.fake_tensor(self.element, (l, m, n)), "C": self.fake_tensor(self.element, (l, m, n)), "alpha": 0.5, "alpha_batch": self.fake_tensor(np.float32, (l, 1, 1)), "D": self.fake_tensor(self.element, (l, m, n)), } launcher = EVTTestBed(self.element, evt_scalar_broadcast, example_inputs) input_keys = ["C", "alpha", "alpha_batch"] result_keys = ["D"] launcher.verify((m, n, k), input_keys, result_keys, l) if __name__ == '__main__': unittest.main()
test/python/cutlass/evt/evt_load_sm80_90.py/0
{ "file_path": "test/python/cutlass/evt/evt_load_sm80_90.py", "repo_id": "test", "token_count": 2528 }
45
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Implicit GEMM testbed */ #pragma once #include <fstream> #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "cutlass/reduction/device/reduce_split_k.h" #include "cutlass/reduction/thread/reduction_operators.h" #include "conv2d_problems.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/tensor_compare.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/host_reorder.h" #include "cutlass/util/reference/host/convolution.h" #include "cutlass/util/reference/device/convolution.h" #include "cutlass/core_io.h" #include "cutlass/util/tensor_view_io.h" #include "../cache_testbed_output.h" namespace test { namespace conv { namespace device { template <typename Conv2d, int InterleavedK> class InterleavedTestbedConv2d { public: using ElementA = typename Conv2d::ElementA; using LayoutA = typename Conv2d::LayoutA; using ElementB = typename Conv2d::ElementB; using LayoutB = typename Conv2d::LayoutB; using ElementC = typename Conv2d::ElementC; using LayoutC = typename Conv2d::LayoutC; using ElementAccumulator = typename Conv2d::ElementAccumulator; using ElementCompute = typename Conv2d::ElementCompute; using EpilogueOutputOp = typename Conv2d::EpilogueOutputOp; static cutlass::conv::Operator const kConvolutionalOperator = Conv2d::kConvolutionalOperator; /// Reduction kernel using ReductionOp = cutlass::reduction::thread::ReduceAdd< ElementAccumulator, typename EpilogueOutputOp::ElementAccumulator, EpilogueOutputOp::kCount >; using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK< cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, EpilogueOutputOp, ReductionOp >; using ReductionDevice = cutlass::reduction::device::ReduceSplitK<ReductionKernel>; using ReductionStrideIndex = typename ReductionDevice::StrideIndex; public: /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; uint64_t seed; cutlass::HostTensor<ElementA, LayoutA> tensor_A; cutlass::HostTensor<ElementB, LayoutB> tensor_B; cutlass::HostTensor<ElementB, LayoutB> tensor_B_reordered; cutlass::HostTensor<ElementC, LayoutC> tensor_C; cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed; cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference; public: InterleavedTestbedConv2d( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint64_t seed_ = 2080 ): init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } /// Helper to initialize a tensor view template <typename Element, typename Layout> void initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { int scope; int bits = cutlass::sizeof_bits<Element>::value; if (bits <= 8) { scope = 2; } else if (bits == 16) { scope = 3; } else { scope = 8; } cutlass::reference::host::TensorFillRandomUniform( view, seed, scope, -scope, 0); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential(view.data(), view.capacity()); } else { } } void initialize( cutlass::conv::Conv2dProblemSize const &problem_size, uint64_t seed = 2019) { tensor_A.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size)); tensor_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size)); tensor_B_reordered.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size)); tensor_C.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size)); tensor_D_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size)); tensor_D_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size)); initialize_tensor(tensor_A.host_view(), init_A, seed); initialize_tensor(tensor_B.host_view(), init_B, seed * 17); initialize_tensor(tensor_C.host_view(), init_C, seed * 39); cutlass::reorder_convK<InterleavedK>( tensor_B_reordered.host_ref(), tensor_B.host_ref(), implicit_gemm_problem_size(kConvolutionalOperator, problem_size)); tensor_A.sync_device(); tensor_B.sync_device(); tensor_B_reordered.sync_device(); tensor_C.sync_device(); tensor_D_computed.sync_device(); tensor_D_reference.sync_device(); } bool sufficient() const { // // Determine SMEM requirements and waive if not satisfied // size_t smem_size = sizeof(typename Conv2d::UnderlyingKernel::SharedStorage); cudaDeviceProp properties; int device_idx; cudaError_t result = cudaGetDevice(&device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDevice() API call failed."); } result = cudaGetDeviceProperties(&properties, device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDeviceProperties() failed"); } if (properties.sharedMemPerMultiprocessor < smem_size) { return false; } return true; } /// Executes one test bool run( cutlass::conv::Conv2dProblemSize const &problem_size, cutlass::conv::SplitKMode const &split_k_mode = cutlass::conv::SplitKMode::kSerial, ElementCompute alpha = ElementCompute(1), ElementCompute beta = ElementCompute(0)) { // Waive test if insufficient CUDA device if (!sufficient()) { if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { std::cerr << "Test waived due to insufficient CUDA device." << std::endl; } return true; } #if 0 //display conv2d problem size for debugging std::cout << problem_size << std::endl << "alpha, beta: (" << float(alpha) << ", " << float(beta) << ")" << std::endl << "split_k_mode: " << ((split_k_mode == cutlass::conv::SplitKMode::kSerial) ? "(serial)" : "(parallel)") << std::endl << std::endl; #endif initialize(problem_size); // configure the operator Conv2d conv2d_op; typename Conv2d::Arguments conv2d_args( problem_size, tensor_A.device_ref(), tensor_B_reordered.device_ref(), tensor_C.device_ref(), tensor_D_computed.device_ref(), {alpha, beta}, split_k_mode ); // find workspace requirement for parallel split-k reduction size_t workspace_size = Conv2d::get_workspace_size(conv2d_args); cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); cutlass::Status status = conv2d_op.initialize(conv2d_args, workspace.get()); // conv2d operation with parallel split-k-mode if (split_k_mode == cutlass::conv::SplitKMode::kParallel) { // conv2d output is written to workspace in global memory conv2d_args.ref_D.reset(reinterpret_cast<ElementC*>(workspace.get())); // accumulate mma for each cta in k-dimension (1.0 * A * B) conv2d_args.output_op = {ElementCompute(1), ElementCompute(0)}; // update conv2d operator arguments status = conv2d_op.update(conv2d_args, workspace.get()); } EXPECT_TRUE(status == cutlass::Status::kSuccess); if (status != cutlass::Status::kSuccess) { return false; } // run conv2d operator status = conv2d_op(); EXPECT_TRUE(status == cutlass::Status::kSuccess); if (status != cutlass::Status::kSuccess) { return false; } if (split_k_mode == cutlass::conv::SplitKMode::kParallel) { // configure parallel reduction operator ReductionDevice reduction_op; typename ReductionDevice::Arguments reduction_args( cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn(), problem_size.split_k_slices, cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, problem_size), { reinterpret_cast<ElementAccumulator*> (workspace.get()), ReductionStrideIndex(tensor_C.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx]) }, { tensor_D_computed.device_data(), ReductionStrideIndex(tensor_C.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx]) }, { tensor_C.device_data(), ReductionStrideIndex(tensor_C.stride()[Conv2d::UnderlyingKernel::kTensorCStrideIdx]) }, // apply alpha, beta to obtain the following equation alpha * ReduceAdd(A * B) + beta * C {alpha, beta} ); status = reduction_op.initialize(reduction_args, nullptr); EXPECT_TRUE(status == cutlass::Status::kSuccess); if (status != cutlass::Status::kSuccess) { return false; } // run prallel reduction kernel status = reduction_op(); EXPECT_TRUE(status == cutlass::Status::kSuccess); if (status != cutlass::Status::kSuccess) { return false; } } bool passed = false; tensor_D_computed.sync_host(); // // Reference check - support caching results // CachedTestKey cached_test_key = CreateCachedConv2dTestKey< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator, ElementCompute >( kConvolutionalOperator, problem_size, alpha, beta, tensor_A.host_view(), tensor_B.host_view(), tensor_C.host_view() ); // // Look for the cached key // bool cached_result_loaded = false; CachedTestResult cached_test_result; std::string conv2d_result_cache_name = std::string("cached_results_") + CUTLASS_TARGET_NAME + ".txt"; if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) { CachedTestResultListing cached_results(conv2d_result_cache_name); auto cached = cached_results.find(cached_test_key); cached_result_loaded = cached.first; if (cached_result_loaded) { cached_test_result = cached.second; } } if (!cached_result_loaded) { #if CUTLASS_CONV_TEST_UNIT_REFERENCE_DEVICE_ENABLED cutlass::reference::device::Conv2d< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementCompute, ElementAccumulator, cutlass::NumericConverterClamp<ElementC, ElementCompute> >( kConvolutionalOperator, problem_size, tensor_A.device_ref(), tensor_B.device_ref(), tensor_C.device_ref(), tensor_D_reference.device_ref(), alpha, beta); cudaError_t result = cudaDeviceSynchronize(); EXPECT_EQ(result, cudaSuccess) << " device reference error: " << cudaGetErrorString(result); // sync host (copy device data to host) for dumping error output in case of mismatches tensor_D_reference.sync_host(); #else cutlass::reference::host::Conv2d< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementCompute, ElementAccumulator, ElementC, cutlass::NumericConverterClamp<ElementC, ElementCompute> >( kConvolutionalOperator, problem_size, tensor_A.host_ref(), tensor_B.host_ref(), tensor_C.host_ref(), tensor_D_reference.host_ref(), alpha, beta); #endif if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) { cached_test_result.D = TensorHash(tensor_D_reference.host_view()); CachedTestResultListing cached_results(conv2d_result_cache_name); cached_results.append(cached_test_key, cached_test_result); cached_results.write(conv2d_result_cache_name); } } // if (!cached_result_loaded) uint32_t tensor_D_hash = TensorHash(tensor_D_computed.host_view()); if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) { passed = (tensor_D_hash == cached_test_result.D); EXPECT_EQ(tensor_D_hash, cached_test_result.D) << "Hash-based comparison failed for key:" << "\n" << cached_test_key << "\n"; } else { passed = cutlass::reference::host::TensorEquals( tensor_D_computed.host_view(), tensor_D_reference.host_view()); } EXPECT_TRUE(passed); if (!passed) { std::stringstream fname; fname << "error_Conv2d_ImplicitGemm_device_" << (split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial_reduction_" : "parallel_reduction_") << (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kFprop ? "fprop_" : (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ? "dgrad_" : "wgrad_")) << "ncxhwx_" << problem_size.N << "x" << problem_size.H << "x" << problem_size.W << "x" << problem_size.C << "_cxrskx_" << problem_size.K << "x" << problem_size.R << "x" << problem_size.S << "x" << problem_size.C << "_padding_" << problem_size.pad_h << "x" << problem_size.pad_w << "_stride_" << problem_size.stride_h << "x" << problem_size.stride_w << "_dilation_" << problem_size.dilation_h << "x" << problem_size.dilation_w << "_" << (problem_size.mode == cutlass::conv::Mode::kCrossCorrelation ? "xcorr_" : "conv_") << Conv2d::ThreadblockShape::kM << "x" << Conv2d::ThreadblockShape::kN << "x" << Conv2d::ThreadblockShape::kK << "_" << Conv2d::WarpShape::kM << "x" << Conv2d::WarpShape::kN << "x" << Conv2d::WarpShape::kK << ".txt"; std::cout << fname.str() << std::endl; std::ofstream results(fname.str()); results << problem_size << std::endl; results << "\nA:\n" << tensor_A.host_view() << "\n" << "\nB:\n" << tensor_B.host_view() << "\n" << "\nC:\n" << tensor_C.host_view() << "\n"; results << "\nD reference (hash: " << cached_test_result.D << ")\n"; if (!cached_result_loaded) { results << tensor_D_reference.host_view() << "\n"; } results << "\nD computed (hash: " << tensor_D_hash << ")\n" << tensor_D_computed.host_view() << "\n"; } return passed; } }; ///////////////////////////////////////////////////////////////////////////////////////////////////////// // TestAllConv: Runs cutlass::conv::device::ImplicitGemmConvolution operator and compares it with reference // TestAllConv runs conv operator on default conv problem sizes from test::conv::device::TestbedConv2dProblemSizes // Additionally, each conv2d test can provide conv problem sizes (conv_test_sizes) and blacklist of sizes // (conv_blacklist_sizes) ///////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename ImplicitGemm, int InterleavedK> bool TestAllInterleavedConv2d( const Conv2dProblemVector & conv_test_sizes = Conv2dProblemVector(), const Conv2dProblemVector & conv_blacklist_sizes = Conv2dProblemVector()) { bool passed = true; // // Testbed object // InterleavedTestbedConv2d<ImplicitGemm, InterleavedK> testbed; // // Get conv problem sizes to run conv operator // TestbedConv2dProblemSizes conv_problems(InterleavedK); // minimum channel size must be multiple of InterleavedK for interleaved layout // Vector of conv2d problem sizes to avoid duplicate runs Conv2dProblemVector conv_tested_sizes; Conv2dProblemVector const *problem_vectors[] = { &conv_test_sizes, // run user specified sizes &conv_problems.conv2d_default_sizes, // run default and cudnn bug sizes &conv_problems.conv2d_resnet50_sizes, // run resnet50 sizes #if CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED &conv_problems.conv2d_rigorous_sizes, // run large and rigorous sizes if enabled #endif }; // Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0) for (Conv2dProblemVector const * problem_vector : problem_vectors) { ChannelDivisibilitySpecification channel_spec(InterleavedK); //input and output channels must be multiple of InterleavedK auto pruned_problem_vector = prune(*problem_vector, channel_spec); // Run conv testbed on default convolution sizes for(auto conv_problem : pruned_problem_vector) { // Skip blacklist and avoid duplicate problem sizes if (std::find(conv_blacklist_sizes.begin(), conv_blacklist_sizes.end(), conv_problem) != conv_blacklist_sizes.end() || std::find(conv_tested_sizes.begin(), conv_tested_sizes.end(), conv_problem) != conv_tested_sizes.end()) { continue; } // // Procedurally disable certain cases // // CUTLASS DGRAD's unity stride specialization only support stride {1, 1} if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad) && (ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport == cutlass::conv::StrideSupport::kUnity)) { if (!((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) { continue; } } // // Test // // push back tested problem size to avoid re-running duplicates conv_tested_sizes.push_back(conv_problem); // test mode = xcross passed = testbed.run( conv_problem, cutlass::conv::SplitKMode::kSerial); if (!passed) { return false; } // test mode = convolution passed = testbed.run( conv_problem.reset_mode(cutlass::conv::Mode::kConvolution), cutlass::conv::SplitKMode::kSerial); if (!passed) { return false; } } } #if 0 // Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for // a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters // which are abolutely necessary to catch functional bugs. The below code does provide option to sweep // alpha and beta for local testing, but only runs one value for alpha and beta. cutlass::conv::Conv2dProblemSize conv2d_split_k_test_size ( {1, 17, 11, 288}, // input size (NHWC) {160, 3, 3, 288}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {1, 1}, // stride (stride_h, stride_w) {1, 1} // dilation (dilation_h, dilation_w) ); cutlass::conv::SplitKMode split_k_modes [] = { cutlass::conv::SplitKMode::kSerial, cutlass::conv::SplitKMode::kParallel, }; int split_k_slices[] = { 1, 2, 3, 4, 201 }; double problem_alpha[] = { 2.0 }; double problem_beta[] = { 2.0 }; for (auto split_k_mode : split_k_modes) { for (auto split_k_slice : split_k_slices) { for (auto alpha : problem_alpha) { for (auto beta : problem_beta) { passed = testbed.run( conv2d_split_k_test_size.reset_split_k_slices(split_k_slice), split_k_mode, cutlass::from_real<typename ImplicitGemm::ElementCompute>(alpha), cutlass::from_real<typename ImplicitGemm::ElementCompute>(beta)); if (!passed) { return false; } } } } } #endif return passed; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace conv } // namespace test
test/unit/conv/device/conv2d_testbed_interleaved.h/0
{ "file_path": "test/unit/conv/device/conv2d_testbed_interleaved.h", "repo_id": "test", "token_count": 8832 }
46
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide Depthwise Direct Conv interface */ #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_depthwise_fprop.h" #include "cutlass/conv/device/direct_convolution.h" #include "conv2d_testbed.h" #include "depthwise_conv2d_direct_conv_testbed.h" std::vector<cutlass::conv::Conv2dProblemSize> DepthwiseFpropProblemSizes_filter3x3() { std::vector<cutlass::conv::Conv2dProblemSize> problems; for (int channels = 16; channels <= 512; channels += 16) { problems.push_back(cutlass::conv::Conv2dProblemSize( {1, 8, 8, channels}, // input size (NHWC) {channels, 3, 3, 1}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {1, 1}, // stride (stride_h, stride_w) {1, 1}, // dilation (dilation_h, dilation_w) cutlass::conv::Mode::kCrossCorrelation, // Convolution mode 16, // split_k_slices channels // groups )); // if(channels == 512 || channels == 16*14) problems.push_back(cutlass::conv::Conv2dProblemSize( {1, 16, 16, channels}, // input size (NHWC) {channels, 3, 3, 1}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {2, 2}, // stride (stride_h, stride_w) {2, 2}, // dilation (dilation_h, dilation_w) cutlass::conv::Mode::kCrossCorrelation, // Convolution mode 16, // split_k_slices channels // groups )); } return problems; } std::vector<cutlass::conv::Conv2dProblemSize> DepthwiseFpropProblemSizes_filter5x5() { std::vector<cutlass::conv::Conv2dProblemSize> problems; for (int channels = 16; channels < 256; channels += 16) { problems.push_back(cutlass::conv::Conv2dProblemSize( {1, 16, 16, channels}, // input size (NHWC) {channels, 5, 5, 1}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {1, 1}, // stride (stride_h, stride_w) {1, 1}, // dilation (dilation_h, dilation_w) cutlass::conv::Mode::kCrossCorrelation, // Convolution mode 16, // split_k_slices channels // groups )); problems.push_back(cutlass::conv::Conv2dProblemSize( {1, 112, 112, channels}, // input size (NHWC) {channels, 5, 5, 1}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {1, 1}, // stride (stride_h, stride_w) {1, 1}, // dilation (dilation_h, dilation_w) cutlass::conv::Mode::kCrossCorrelation, // Convolution mode 16, // split_k_slices channels // groups )); problems.push_back(cutlass::conv::Conv2dProblemSize( {1, 112, 112, channels}, // input size (NHWC) {channels, 5, 5, 1}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {2, 2}, // stride (stride_h, stride_w) {2, 2}, // dilation (dilation_h, dilation_w) cutlass::conv::Mode::kCrossCorrelation, // Convolution mode 16, // split_k_slices channels // groups )); } return problems; } std::vector<cutlass::conv::Conv2dProblemSize> DepthwiseFpropProblemSizes_filter5x37() { std::vector<cutlass::conv::Conv2dProblemSize> problems; for (int channels = 16; channels < 256; channels += 16) { problems.push_back(cutlass::conv::Conv2dProblemSize( {1, 128, 128, channels}, // input size (NHWC) {channels, 5, 37, 1}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {1, 1}, // stride (stride_h, stride_w) {1, 1}, // dilation (dilation_h, dilation_w) cutlass::conv::Mode::kCrossCorrelation, // Convolution mode 108, // split_k_slices channels // groups )); } return problems; } //////////////////////////////////////////////////////////////////////////////// TEST( SM60_Device_Depthwise_conv2d_Fprop_Direct_Conv_Optimized_f16nhwc_f16nhwc_f16nhwc_simt_f16, 64x32_4_8x32_3x3) { using ElementInputA = cutlass::half_t; using ElementInputB = cutlass::half_t; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementComputeEpilogue = cutlass::half_t; using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU // SM using MMAOp = cutlass::arch::OpClassSimt; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm60; // This code section describes the groups a thread block will compute constexpr int groups_per_cta = 32; // This code section describes the output tile <N, P, Q, C> a thread block will compute using ThreadBlockOutputShape = cutlass::conv::TensorNHWCShape<1, 8, 8, groups_per_cta>; // This code section describes the filter shape <R, S> using FilterShape = cutlass::MatrixShape<3, 3>; // Threadblock tile shape using ThreadblockShape = cutlass::gemm::GemmShape<ThreadBlockOutputShape::kNHW, groups_per_cta, FilterShape::kCount>; // This code section describes tile size a warp will computes using WarpShape = cutlass::gemm::GemmShape<8, groups_per_cta, FilterShape::kCount>; // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle< 1, ThreadBlockOutputShape::kN, ThreadBlockOutputShape::kH, ThreadBlockOutputShape::kW>; // Number of pipelines you want to use constexpr int NumStages = 4; // This code section describe iterator algorithm selected is Analytic or Optimized static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized; constexpr int kEpilogueElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. kEpilogueElementsPerAccess, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue, // Data type for alpha/beta in linear combination cutlass::epilogue::thread::ScaleType::Default>; using DepthwiseDirect2dConv = typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConvFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, ThreadBlockOutputShape, FilterShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm, cutlass::conv::StrideSupport::kStrided>::Kernel; using Direct2dConv = cutlass::conv::device::DirectConvolution<DepthwiseDirect2dConv>; /// Run all unit test sizes with device-level Conv2d instance EXPECT_TRUE(test::conv::device::TestSpecificDepthwiseDirectConv2d<Direct2dConv>( DepthwiseFpropProblemSizes_filter3x3())); } //////////////////////////////////////////////////////////////////////////////// TEST( SM60_Device_Depthwise_conv2d_Fprop_Direct_Conv_Optimized_f16nhwc_f16nhwc_f16nhwc_simt_f16, 64x64_3_16x64_5x5) { using ElementInputA = cutlass::half_t; using ElementInputB = cutlass::half_t; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementComputeEpilogue = cutlass::half_t; using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU // SM using MMAOp = cutlass::arch::OpClassSimt; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm60; // This code section describes the groups a thread block will compute constexpr int groups_per_cta = 64; // This code section describes the output tile <N, P, Q, C> a thread block will compute using ThreadBlockOutputShape = cutlass::conv::TensorNHWCShape<1, 8, 8, groups_per_cta>; // This code section describes the filter shape <R, S> using FilterShape = cutlass::MatrixShape<5, 5>; // Threadblock tile shape using ThreadblockShape = cutlass::gemm::GemmShape<ThreadBlockOutputShape::kNHW, groups_per_cta, FilterShape::kCount>; // This code section describes tile size a warp will computes using WarpShape = cutlass::gemm::GemmShape<16, groups_per_cta, FilterShape::kCount>; // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle< 1, ThreadBlockOutputShape::kN, ThreadBlockOutputShape::kH, ThreadBlockOutputShape::kW>; // Number of pipelines you want to use constexpr int NumStages = 3; // This code section describe iterator algorithm selected is Analytic or Optimized static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized; constexpr int kEpilogueElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. kEpilogueElementsPerAccess, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue, // Data type for alpha/beta in linear combination cutlass::epilogue::thread::ScaleType::Default>; using DepthwiseDirect2dConv = typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConvFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, ThreadBlockOutputShape, FilterShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm, cutlass::conv::StrideSupport::kStrided>::Kernel; using Direct2dConv = cutlass::conv::device::DirectConvolution<DepthwiseDirect2dConv>; /// Run all unit test sizes with device-level Conv2d instance EXPECT_TRUE(test::conv::device::TestSpecificDepthwiseDirectConv2d<Direct2dConv>( DepthwiseFpropProblemSizes_filter5x5())); } #if 0 //////////////////////////////////////////////////////////////////////////////// TEST( SM60_Device_Depthwise_conv2d_Fprop_Direct_Conv_Optimized_f16nhwc_f16nhwc_f16nhwc_simt_f16, 64x32_3_16x32_5x37) { using ElementInputA = cutlass::half_t; using ElementInputB = cutlass::half_t; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementComputeEpilogue = cutlass::half_t; using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU // SM using MMAOp = cutlass::arch::OpClassSimt; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm60; // This code section describes the groups a thread block will compute constexpr int groups_per_cta = 32; // This code section describes the output tile <N, P, Q, C> a thread block will compute using ThreadBlockOutputShape = cutlass::conv::TensorNHWCShape<1, 8, 8, groups_per_cta>; // This code section describes the filter shape <R, S> using FilterShape = cutlass::MatrixShape<5, 37>; // Threadblock tile shape using ThreadblockShape = cutlass::gemm::GemmShape<ThreadBlockOutputShape::kNHW, groups_per_cta, FilterShape::kCount>; // This code section describes tile size a warp will computes using WarpShape = cutlass::gemm::GemmShape<16, groups_per_cta, FilterShape::kCount>; // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle< 1, ThreadBlockOutputShape::kN, ThreadBlockOutputShape::kH, ThreadBlockOutputShape::kW>; // Number of pipelines you want to use constexpr int NumStages = 2; // This code section describe iterator algorithm selected is Analytic or Optimized static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized; constexpr int kEpilogueElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. kEpilogueElementsPerAccess, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue, // Data type for alpha/beta in linear combination cutlass::epilogue::thread::ScaleType::Default>; using DepthwiseDirect2dConv = typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConvFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, ThreadBlockOutputShape, FilterShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm, cutlass::conv::StrideSupport::kStrided>::Kernel; using Direct2dConv = cutlass::conv::device::DirectConvolution<DepthwiseDirect2dConv>; /// Run all unit test sizes with device-level Conv2d instance EXPECT_TRUE(test::conv::device::TestSpecificDepthwiseDirectConv2d<Direct2dConv>( DepthwiseFpropProblemSizes_filter5x37())); } #endif
test/unit/conv/device/depthwise_conv2d_fprop_direct_conv_f16nhwc_f16nhwc_f16nhwc_simt_f16_sm60.cu/0
{ "file_path": "test/unit/conv/device/depthwise_conv2d_fprop_direct_conv_f16nhwc_f16nhwc_f16nhwc_simt_f16_sm60.cu", "repo_id": "test", "token_count": 7430 }
47
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "../common/cutlass_unit_test.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/matrix.h" //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(TensorRef, basic_rank2) { int const M = 8; int const N = 16; int matrix_data[M * N] = {0}; cutlass::TensorRef< int, cutlass::IdentityTensorLayout<2> > matrix_ref(matrix_data, cutlass::make_Coord(N, 1)); for (int m = 0; m < M; ++m) { for (int n = 0; n < N; ++n) { matrix_ref.at(cutlass::make_Coord(m, n)) = m * N + n; } } for (int m = 0; m < M; ++m) { for (int n = 0; n < N; ++n) { EXPECT_EQ(matrix_data[m * N + n], int(m * N + n)); } } } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(TensorRef, rank2_column_major) { int const M = 8; int const N = 8; int matrix_data[M * N]; cutlass::TensorRef<int, cutlass::layout::ColumnMajor> ref(matrix_data, M); for (int m = 0; m < M; ++m) { for (int n = 0; n < N; ++n) { ref.at(cutlass::make_Coord(m, n)) = m * N + n; } } for (int m = 0; m < M; ++m) { for (int n = 0; n < N; ++n) { EXPECT_EQ(matrix_data[m + n * M], int(m * N + n)); } } } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(TensorRef, rank2_row_major) { int const M = 8; int const N = 16; int matrix_data[M * N] = { 0 }; cutlass::TensorRef<int, cutlass::layout::RowMajor> ref(matrix_data, N); for (int m = 0; m < M; ++m) { for (int n = 0; n < N; ++n) { ref.at(cutlass::make_Coord(m, n)) = m * N + n; } } for (int m = 0; m < M; ++m) { for (int n = 0; n < N; ++n) { EXPECT_EQ(matrix_data[m * N + n], int(m * N + n)); } } } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(TensorRef, rank2_contiguous_dynamic) { int const M = 8; int const N = 16; typedef cutlass::TensorRef<int, cutlass::layout::ContiguousMatrix> ContiguousTensorRef; cutlass::layout::Matrix layouts[] = { cutlass::layout::Matrix::kColumnMajor, cutlass::layout::Matrix::kRowMajor }; for (int i = 0; i < 2; ++i) { int matrix_data[M * N] = { 0 }; int row_stride; int col_stride; if (layouts[i] == cutlass::layout::Matrix::kColumnMajor) { row_stride = 1; col_stride = M; } else { row_stride = N; col_stride = 1; } // Use helper to determine stride vector from leading dimension ContiguousTensorRef ref( matrix_data, cutlass::layout::ContiguousMatrix::packed(cutlass::make_Coord(M, N), layouts[i])); for (int m = 0; m < M; ++m) { for (int n = 0; n < N; ++n) { ref.at(cutlass::make_Coord(m, n)) = m * N + n; } } for (int m = 0; m < M; ++m) { for (int n = 0; n < N; ++n) { EXPECT_EQ(matrix_data[m * row_stride + n * col_stride], int(m * N + n)); } } } } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(TensorRef, rank2_column_major_interleaved) { int const M = 16; int const N = 16; int const kInterleave = 4; int matrix_data[M * N] = {0}; // Define the Layout for a column-major interleaved matrix format using Layout = cutlass::layout::ColumnMajorInterleaved<kInterleave>; // Construct a TensorRef cutlass::TensorRef< int, Layout> ref(matrix_data, Layout::packed(cutlass::make_Coord(M, N))); for (int m = 0; m < M; ++m) { for (int n = 0; n < N; ++n) { ref.at(cutlass::make_Coord(m, n)) = m + n * M; } } // Verify for (int m = 0; m < M; ++m) { for (int n = 0; n < N; n += kInterleave) { for (int i = 0; i < kInterleave; ++i) { EXPECT_EQ(matrix_data[m * kInterleave + n * M + i], int(m + (n + i) * M)); } } } } //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(TensorRef, rank2_row_major_interleaved) { int const M = 16; int const N = 16; int const kInterleave = 4; int matrix_data[M * N] = {0}; // Define the Layout for a row-major interleaved matrix format using Layout = cutlass::layout::RowMajorInterleaved<kInterleave>; // Construct a TensorRef cutlass::TensorRef< int, Layout> ref(matrix_data, Layout::packed(cutlass::make_Coord(M, N))); for (int m = 0; m < M; ++m) { for (int n = 0; n < N; ++n) { ref.at(cutlass::make_Coord(m, n)) = m + n * M; } } // Verify for (int m = 0; m < M; m += kInterleave) { for (int n = 0; n < N; ++n) { for (int i = 0; i < kInterleave; ++i) { EXPECT_EQ(matrix_data[m * N + i + n * kInterleave], int((m + i) + n * M)); } } } } ////////////////////////////////////////////////////////////////////////////////////////////////////
test/unit/core/tensor_ref.cu/0
{ "file_path": "test/unit/core/tensor_ref.cu", "repo_id": "test", "token_count": 2450 }
48
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "cutlass_unit_test.h" #include <iostream> #include <iomanip> #include <utility> #include <type_traits> #include <vector> #include <numeric> #include <cute/container/bit_field.hpp> #include <cute/algorithm/tuple_algorithms.hpp> using namespace cute; TEST(CuTe_core, Bitfield) { for_each(make_int_range<1,65>{}, [&](auto NumBits) { constexpr auto num_bits = cute::remove_cvref_t<decltype(NumBits)>::value; for_each(make_int_range<0, 129>{}, [&](auto BitStart) { constexpr auto bit_start = cute::remove_cvref_t<decltype(BitStart)>::value; using BF = bit_field<bit_start, cute::remove_cvref_t<decltype(NumBits)>::value>; #if 0 printf("bit_field<%d,%d>:\n", bit_start, num_bits); printf(" value_type_bits : %d\n", BF::value_type_bits); printf(" storage_type_bits: %d\n", BF::storage_type_bits); printf(" N : %d\n", BF::N); printf(" idx : %d\n", BF::idx); printf(" bit_lo : %d\n", BF::bit_lo); printf(" bit_hi : %d\n", BF::bit_hi); printf(" mask : 0x%lx\n", uint64_t(BF::mask)); printf(" mask_lo : 0x%lx\n", uint64_t(BF::mask_lo)); printf(" mask_hi : 0x%lx\n", uint64_t(BF::mask_hi)); #endif // Test uint64_t v = num_bits == 64 ? uint64_t(-1) : ((uint64_t(1) << NumBits) - 1); BF bf{}; bf = v; EXPECT_EQ(v, uint64_t(bf)); }); }); for_each(make_int_range<0,129>{}, [&](auto BitStart) { using BF = bit_field<cute::remove_cvref_t<decltype(BitStart)>::value, 32, float>; BF bf{}; bf = 3.14f; EXPECT_EQ(3.14f, float(bf)); }); }
test/unit/cute/core/bitfield.cpp/0
{ "file_path": "test/unit/cute/core/bitfield.cpp", "repo_id": "test", "token_count": 1290 }
49
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "cutlass_unit_test.h" #include "../hopper/tma_store_testbed.hpp" using namespace cute; using namespace cutlass::test; #if CUDA_12_0_SM90_FEATURES_SUPPORTED template <class T, class TmaType = T, class GMEM_Layout, class SMEM_Layout, class CTA_Tile> void test_tma_store(GMEM_Layout const& gmem_layout, SMEM_Layout const& smem_layout, CTA_Tile const& cta_tile) { using namespace cute; return test_tma_store<T, TmaType>(SM90_TMA_STORE{}, gmem_layout, smem_layout, cta_tile); } template <class T, class TmaType = T, class GMEM_Layout, class SMEM_Layout> void test_tma_store(GMEM_Layout const& gmem_layout, SMEM_Layout const& smem_layout) { using namespace cute; return test_tma_store<T, TmaType>(gmem_layout, smem_layout, product_each(shape(smem_layout))); } TEST(SM90_CuTe_Hopper, Tma_Load_1D) { Layout smem_layout = Layout<_256, _1>{}; { Layout gmem_layout = smem_layout; test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } { Layout gmem_layout = make_layout(128, GenColMajor{}); test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } } TEST(SM90_CuTe_Hopper, Tma_Store_32x32_Col) { Layout smem_layout = Layout<Shape<_32,_32>, Stride<_1,_32>>{}; { Layout gmem_layout = smem_layout; test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } { Layout gmem_layout = make_layout(make_shape(32,32), GenColMajor{}); test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } { Layout gmem_layout = make_layout(make_shape(32,32), make_stride(Int<1>{}, 1024)); test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } } TEST(SM90_CuTe_Hopper, Tma_Store_32x32_Row) { Layout smem_layout = Layout<Shape<_32,_32>, Stride<_32,_1>>{}; { Layout gmem_layout = smem_layout; test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } { Layout gmem_layout = make_layout(make_shape(32,32), GenRowMajor{}); test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } { Layout gmem_layout = make_layout(make_shape(32,32), make_stride(1024, Int<1>{})); test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } } template <class T, template <typename> typename SWIZZLE_ATOM> void test_tma_store_swizzle_atom_mn() { auto smem_layout = SWIZZLE_ATOM<T>{}; Layout gmem_layout = make_layout(make_shape(2*size<0>(smem_layout), 2*size<1>(smem_layout)), GenColMajor{}); return test_tma_store<T>(gmem_layout, smem_layout); } template <class T, template <typename> typename SWIZZLE_ATOM> void test_tma_store_swizzle_atom_k() { auto smem_layout = SWIZZLE_ATOM<T>{}; Layout gmem_layout = make_layout(make_shape(2*size<0>(smem_layout), 2*size<1>(smem_layout)), GenRowMajor{}); return test_tma_store<T>(gmem_layout, smem_layout); } TEST(SM90_CuTe_Hopper, Tma_Store_Swizzle_Atoms) { test_tma_store_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW128_Atom>(); test_tma_store_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW128_Atom>(); test_tma_store_swizzle_atom_mn< float, GMMA::Layout_MN_SW128_Atom>(); test_tma_store_swizzle_atom_mn<double, GMMA::Layout_MN_SW128_Atom>(); test_tma_store_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW64_Atom>(); test_tma_store_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW64_Atom>(); test_tma_store_swizzle_atom_mn< float, GMMA::Layout_MN_SW64_Atom>(); test_tma_store_swizzle_atom_mn<double, GMMA::Layout_MN_SW64_Atom>(); test_tma_store_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW32_Atom>(); test_tma_store_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW32_Atom>(); test_tma_store_swizzle_atom_mn< float, GMMA::Layout_MN_SW32_Atom>(); test_tma_store_swizzle_atom_mn<double, GMMA::Layout_MN_SW32_Atom>(); test_tma_store_swizzle_atom_mn<int8_t, GMMA::Layout_MN_INTER_Atom>(); test_tma_store_swizzle_atom_mn<half_t, GMMA::Layout_MN_INTER_Atom>(); test_tma_store_swizzle_atom_mn< float, GMMA::Layout_MN_INTER_Atom>(); test_tma_store_swizzle_atom_mn<double, GMMA::Layout_MN_INTER_Atom>(); test_tma_store_swizzle_atom_k<int8_t, GMMA::Layout_K_SW128_Atom>(); test_tma_store_swizzle_atom_k<half_t, GMMA::Layout_K_SW128_Atom>(); test_tma_store_swizzle_atom_k< float, GMMA::Layout_K_SW128_Atom>(); test_tma_store_swizzle_atom_k<double, GMMA::Layout_K_SW128_Atom>(); test_tma_store_swizzle_atom_k<int8_t, GMMA::Layout_K_SW64_Atom>(); test_tma_store_swizzle_atom_k<half_t, GMMA::Layout_K_SW64_Atom>(); test_tma_store_swizzle_atom_k< float, GMMA::Layout_K_SW64_Atom>(); test_tma_store_swizzle_atom_k<double, GMMA::Layout_K_SW64_Atom>(); test_tma_store_swizzle_atom_k<int8_t, GMMA::Layout_K_SW32_Atom>(); test_tma_store_swizzle_atom_k<half_t, GMMA::Layout_K_SW32_Atom>(); test_tma_store_swizzle_atom_k< float, GMMA::Layout_K_SW32_Atom>(); test_tma_store_swizzle_atom_k<double, GMMA::Layout_K_SW32_Atom>(); test_tma_store_swizzle_atom_k<int8_t, GMMA::Layout_K_INTER_Atom>(); test_tma_store_swizzle_atom_k<half_t, GMMA::Layout_K_INTER_Atom>(); test_tma_store_swizzle_atom_k< float, GMMA::Layout_K_INTER_Atom>(); test_tma_store_swizzle_atom_k<double, GMMA::Layout_K_INTER_Atom>(); } template <class T, template <typename> typename SWIZZLE_ATOM> void test_tma_store_swizzle_tile_mn() { auto smem_layout = tile_to_shape(SWIZZLE_ATOM<T>{}, Shape<_128,_128>{}); Layout gmem_layout = make_layout(make_shape(2*size<0>(smem_layout), 2*size<1>(smem_layout)), GenColMajor{}); return test_tma_store<T>(gmem_layout, smem_layout); } template <class T, template <typename> typename SWIZZLE_ATOM> void test_tma_store_swizzle_tile_k() { auto smem_layout = tile_to_shape(SWIZZLE_ATOM<T>{}, Shape<_128,_128>{}); Layout gmem_layout = make_layout(make_shape(2*size<0>(smem_layout), 2*size<1>(smem_layout)), GenRowMajor{}); return test_tma_store<T>(gmem_layout, smem_layout); } TEST(SM90_CuTe_Hopper, Tma_Store_Swizzle_Tiles) { // Other T-types use too much smem test_tma_store_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW128_Atom>(); test_tma_store_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW128_Atom>(); test_tma_store_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW64_Atom>(); test_tma_store_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW64_Atom>(); test_tma_store_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW32_Atom>(); test_tma_store_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW32_Atom>(); test_tma_store_swizzle_tile_mn<int8_t, GMMA::Layout_MN_INTER_Atom>(); test_tma_store_swizzle_tile_mn<half_t, GMMA::Layout_MN_INTER_Atom>(); test_tma_store_swizzle_tile_k<int8_t, GMMA::Layout_K_SW128_Atom>(); test_tma_store_swizzle_tile_k<half_t, GMMA::Layout_K_SW128_Atom>(); test_tma_store_swizzle_tile_k<int8_t, GMMA::Layout_K_SW64_Atom>(); test_tma_store_swizzle_tile_k<half_t, GMMA::Layout_K_SW64_Atom>(); test_tma_store_swizzle_tile_k<int8_t, GMMA::Layout_K_SW32_Atom>(); test_tma_store_swizzle_tile_k<half_t, GMMA::Layout_K_SW32_Atom>(); test_tma_store_swizzle_tile_k<int8_t, GMMA::Layout_K_INTER_Atom>(); test_tma_store_swizzle_tile_k<half_t, GMMA::Layout_K_INTER_Atom>(); } // Tensor by-mode TEST(SM90_CuTe_Hopper, Tma_Store_Tensor) { // 3-mode TMA { Layout gmem_layout = make_layout(make_shape(128, 64, 5)); auto cta_tile = Shape<_64, _32>{}; // GMEM Tiling: // Take 64-elem from m // Take 32-elem from k auto smem_layout = make_layout(Shape<_64,_32>{}); test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile); } // 4-mode TMA { Layout gmem_layout = make_layout(make_shape(make_shape(80,40),make_shape(32,12))); auto cta_tile = Shape<Shape<_16,_8>,Shape<_32,_2>>{}; // GMEM Tiling: // Take 16-elem from m0, 8-elem from m1, // Take 32-elem from k0, 2-elem from k1 auto smem_layout = make_layout(Shape<_128,_64>{}); test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile); } // 5-mode TMA { Layout gmem_layout = make_layout(make_shape(make_shape(32,32,32),make_shape(32,12))); auto cta_tile = Shape<Shape<_16,_4,_2>,Shape<_16,_2>>{}; // GMEM Tiling: // Take 4-elem from m0, 4-elem from m1, 5-elem from m2 // Take 32-elem from k0, 2-elem from k1 auto smem_layout = make_layout(Shape<_128,_32>{}); test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile); } } // Tensor Multimode -- TMA with more than 5 modes in GMEM (packs residual modes into last TMA mode) TEST(SM90_CuTe_Hopper, Tma_Store_Tensor_Multimode) { { Layout gmem_layout = make_layout(make_shape(make_shape(32,3,2,2),make_shape(32,4,2))); auto cta_tile = Shape<Shape<_32>, Shape<_32,_2>>{}; // GMEM Tiling: // Take 32-elem from m0 // Take 32-elem from k0, 2-elem from k1 auto smem_layout = make_layout(Shape<_32,_64>{}); test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile); } { Layout gmem_layout = make_layout(make_shape(make_shape(64,3,2,2),make_shape(32,4,2))); auto cta_tile = Shape<Shape<_32,_3>, Shape<_32,_2>>{}; // GMEM Tiling: // Take 32-elem from m0, 3-elem from m1 // Take 32-elem from k0, 2-elem from k1 auto smem_layout = make_layout(Shape<_96,_64>{}); test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile); } { Layout gmem_layout = make_layout(make_shape(make_shape(64,3,2,3,2),make_shape(32,4,2,2))); auto cta_tile = Shape<Shape<_32>, Shape<_16,_2>>{}; // GMEM Tiling: // Take 32-elem from m0 // Take 16-elem from k0, 2-elem from k1 auto smem_layout = make_layout(Shape<_32,_32>{}); test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile); } } #endif
test/unit/cute/hopper/tma_store.cu/0
{ "file_path": "test/unit/cute/hopper/tma_store.cu", "repo_id": "test", "token_count": 5975 }
50
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/half.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_clamp.h" #include "cutlass/gemm/warp/default_mma_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "testbed.h" ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_64x64_64x64x32) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::int4b_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 32 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_64x64_32x32x32) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::int4b_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 32 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_128x128_64x64x32) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::int4b_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 64 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_128x64_64x32x32) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::int4b_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 32 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_64x128_32x64x32) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::int4b_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 64 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_32x128_32x64x32) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::int4b_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 64 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<32, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_128x32_64x32x32) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::int4b_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 32 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 32, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_256x128_64x64x32) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::int4b_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 64 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<256, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, s4_tensor_op_128x256_64x64x32) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::int4b_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 32 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 256, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_64x64_64x64x16) { // // Define the warp-level matrix multiply // using ElementOutput = int8_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_64x64_32x3216) { // // Define the warp-level matrix multiply // using ElementOutput = int8_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 64 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_128x128_64x64x16) { // // Define the warp-level matrix multiply // using ElementOutput = int8_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_64x128_64x64x16) { // // Define the warp-level matrix multiply // using ElementOutput = int8_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_128x64_64x32x16) { // // Define the warp-level matrix multiply // using ElementOutput = int8_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 64 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_64x128_32x64x16) { // // Define the warp-level matrix multiply // using ElementOutput = int8_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_32x128_32x64x16) { // // Define the warp-level matrix multiply // using ElementOutput = int8_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, s8_tensor_op_128x32_64x32x16) { // // Define the warp-level matrix multiply // using ElementOutput = int8_t; using ElementAccumulator = int; using ElementCompute = float; int const kElementsPerAccess = 64 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using Element = ElementOutput; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAddSaturate>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_64x64_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_128x128_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_128x256_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_256x128_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<256, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_32x32_32x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_64x64_32x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_64x128_32x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, tensor_op_128x64_64x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Mixed precision tests // ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_64x64_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_128x128_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_128x256_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_256x128_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<256, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_32x32_32x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_64x64_32x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_64x128_32x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, mixed_f16_f32_tensor_op_128x64_64x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// // // F16 acumulation // ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_64x64_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_128x128_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_128x256_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_256x128_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<256, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_32x32_32x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_64x64_32x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_64x128_32x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, f16_tensor_op_128x64_64x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_Epilogue_threadblock_epilogue, f64_tensor_op_64x64_32x32x4) { // // Define the warp-level matrix multiply // using ElementOutput = double; using ElementAccumulator = double; using ElementCompute = double; int const kElementsPerAccess = 1; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using Element = double; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_Epilogue_threadblock_epilogue, f64_tensor_op_128x64_64x32x4) { // // Define the warp-level matrix multiply // using ElementOutput = double; using ElementAccumulator = double; using ElementCompute = double; int const kElementsPerAccess = 1; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using Element = double; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_Epilogue_threadblock_epilogue, f64_tensor_op_64x128_32x64x4) { // // Define the warp-level matrix multiply // using ElementOutput = double; using ElementAccumulator = double; using ElementCompute = double; int const kElementsPerAccess = 1; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using Element = double; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_Epilogue_threadblock_epilogue, f64_tensor_op_128x128_32x64x4) { // // Define the warp-level matrix multiply // using ElementOutput = double; using ElementAccumulator = double; using ElementCompute = double; int const kElementsPerAccess = 1; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using Element = double; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, vec1_mixed_f16_f32_tensor_op_128x128_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 1; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, vec1_mixed_f16_f32_tensor_op_128x256_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 1; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM75_Epilogue_threadblock_epilogue, vec1_tensor_op_128x128_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 1; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM75_Epilogue_threadblock_epilogue, vec1_tensor_op_128x256_64x64x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 1; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<128, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous< cutlass::sizeof_bits<Element>::value, 64>; using LayoutC = cutlass::layout::RowMajor; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, LayoutC>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } /////////////////////////////////////////////////////////////////////////////////////////////////
test/unit/epilogue/threadblock/epilogue_tensor_op.cu/0
{ "file_path": "test/unit/epilogue/threadblock/epilogue_tensor_op.cu", "repo_id": "test", "token_count": 30192 }
51
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide GEMM interface with stream-K scheduling */ #include <iostream> #include "cutlass/cutlass.h" #include "cute/tensor.hpp" #include "cute/atom/mma_atom.hpp" #include "cutlass/numeric_types.h" #include "cutlass/gemm/device/gemm_universal_adapter.h" #include "cutlass/gemm/kernel/gemm_universal.hpp" #include "cutlass/gemm/kernel/tile_scheduler.hpp" #include "cutlass/gemm/collective/collective_builder.hpp" #include "cutlass/epilogue/collective/collective_builder.hpp" #include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" #include "cutlass/epilogue/collective/default_epilogue.hpp" #include "cutlass/epilogue/thread/linear_combination.h" #include "../../common/cutlass_unit_test.h" #include "gemm_testbed_3x.hpp" #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) using namespace cute; TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x1x1) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_1,_1,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 8, ElementB, LayoutB, 8, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_1x2x1) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_1,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 8, ElementB, LayoutB, 8, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// /////////////////////////////// Cluster 2x2x1 //////////////////////////////// /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_2x2x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x2x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_2x2x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x2x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_1,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// /////////////////////////////// Cluster 4x1x1 //////////////////////////////// /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_4,_1,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_4,_1,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_4,_1,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_4,_1,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// /////////////////////////////// Cluster 1x4x1 //////////////////////////////// /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_1,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_1,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_1,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_1,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// /////////////////////////////// Cluster 2x4x1 //////////////////////////////// /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } TEST(SM90_Device_Gemm_f16t_f16n_f16n_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 256x128x64_2x2x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::TmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } TEST(SM90_Device_Gemm_f16t_f16n_f16t_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 256x128x64_2x2x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::TmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 128x128x64_2x2x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::epilogue::TmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 128x128x64_2x2x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::epilogue::TmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 256x128x64_2x2x1_BiasF32_ReLU) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, float>; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, EpilogueSchedule, FusionOperation >::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>(1.0, 1.0)); } #endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cooperative_stream_k.cu/0
{ "file_path": "test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cooperative_stream_k.cu", "repo_id": "test", "token_count": 15914 }
52
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for grouped Rank2K problem visitors */ #pragma once #include <iostream> #include <numeric> #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/rank_2k_grouped_problem_visitor.h" #include "cutlass/util/device_memory.h" #include "cutlass/device_kernel.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// // Use simple problem visitor as a baseline template <typename ProblemSizeHelper, typename ThreadblockShape, int PrefetchTileCount, int ThreadCount, cutlass::FillMode FillModeC> struct BaselineProblemVisitor : public cutlass::gemm::kernel::BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape> { using Base = cutlass::gemm::kernel::BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape>; using Params = typename Base::Params; static int const kThreadCount = ThreadCount; static cutlass::FillMode const kFillModeC = FillModeC; struct SharedStorage {}; int32_t tile_count_sum; SharedStorage &shared_storage; // // Methods // CUTLASS_DEVICE BaselineProblemVisitor( Params const &params_, SharedStorage &shared_storage_, int32_t block_idx ): Base(params_, block_idx), shared_storage(shared_storage_) { cutlass::gemm::GemmCoord problem = this->problem_size(); cutlass::gemm::GemmCoord grid = this->grid_shape(problem); tile_count_sum = this->tile_count(grid); } CUTLASS_DEVICE bool next_tile() { if (this->tile_idx < tile_count_sum) { return true; } do { ++this->problem_idx; if (this->problem_idx >= this->params.problem_count) { return false; } cutlass::gemm::GemmCoord problem = this->problem_size(); cutlass::gemm::GemmCoord grid = this->grid_shape(problem); this->problem_tile_start = tile_count_sum; tile_count_sum += this->tile_count(grid); } while (tile_count_sum <= this->tile_idx); return true; } static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count, int32_t block_count) { return 0; } static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count, int32_t block_count, void* host_workspace_ptr) {} CUTLASS_DEVICE cutlass::gemm::GemmCoord threadblock_offset(int32_t threadblock_id) const { int32_t macro_id = threadblock_id / ProblemSizeHelper::OffsetHelper::kThreadblockSkewRatio; int32_t macro_row = ceil(cutlass::fast_sqrt((2*macro_id) + 2.25) - 0.5) - 1; int32_t macro_col = macro_id - (((macro_row+1) * macro_row)/2); if (FillModeC == cutlass::FillMode::kUpper) { cutlass::swap(macro_row, macro_col); } int32_t row = ProblemSizeHelper::OffsetHelper::macro_row_to_row(macro_row, threadblock_id); int32_t col = ProblemSizeHelper::OffsetHelper::macro_col_to_col(macro_col, threadblock_id); return cutlass::gemm::GemmCoord(row, col, 0); } }; template <typename ProblemVisitor> struct ProblemVisitorKernel { struct SharedStorage { typename ProblemVisitor::SharedStorage problem_visitor; }; struct Params { typename ProblemVisitor::Params problem_visitor_params; int32_t* visited_problems_ptr; int32_t* visited_tiles_ptr; int32_t visits_per_block; Params(): visited_problems_ptr(nullptr), visited_tiles_ptr(nullptr), visits_per_block(0) {} Params(typename ProblemVisitor::Params problem_visitor_params_, int32_t* visited_problems_ptr_, int32_t* visited_tiles_ptr_, int32_t visits_per_block_): problem_visitor_params(problem_visitor_params_), visited_problems_ptr(visited_problems_ptr_), visited_tiles_ptr(visited_tiles_ptr_), visits_per_block(visits_per_block_) {} }; CUTLASS_DEVICE void operator()(const Params& params, SharedStorage &shared_storage) { int32_t store_offset = params.visits_per_block * blockIdx.x; ProblemVisitor problem_visitor(params.problem_visitor_params, shared_storage.problem_visitor, blockIdx.x); while (problem_visitor.next_tile()) { cutlass::gemm::GemmCoord problem_size = problem_visitor.problem_size(); int32_t problem_idx = problem_visitor.problem_index(); int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx()); cutlass::gemm::GemmCoord grid_shape = problem_visitor.grid_shape(problem_size); cutlass::gemm::GemmCoord tile_offset = problem_visitor.threadblock_offset(threadblock_idx); problem_visitor.advance(gridDim.x); // // Early exit conditions // 1) Out of range // 2) Upper-triangular block in lower-triangular problem // 3) Lower-triangular block in upper-triangular problem // if (grid_shape.m() <= tile_offset.m() || grid_shape.n() <= tile_offset.n()) { continue; } if (ProblemVisitor::kFillModeC == cutlass::FillMode::kLower && (tile_offset.m() + 1) * ProblemVisitor::ThreadblockShape::kM <= tile_offset.n() * ProblemVisitor::ThreadblockShape::kN) { continue; } if (ProblemVisitor::kFillModeC == cutlass::FillMode::kUpper && tile_offset.m() * ProblemVisitor::ThreadblockShape::kM >= (tile_offset.n() + 1) * ProblemVisitor::ThreadblockShape::kN) { continue; } if (threadIdx.x == 0) { params.visited_problems_ptr[store_offset] = problem_idx; params.visited_tiles_ptr[store_offset] = threadblock_idx; ++store_offset; } } } }; template <typename ProblemVisitor> struct ProblemVisitorRunner { using BaseKernel = ProblemVisitorKernel<ProblemVisitor>; using Params = typename BaseKernel::Params; Params params; std::vector<cutlass::gemm::GemmCoord> host_problem_sizes; int32_t problem_count; int32_t threadblock_count; int32_t visits_per_block; cutlass::DeviceAllocation<int32_t> visited_problems; cutlass::DeviceAllocation<int32_t> visited_tiles; cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> device_problem_sizes; cutlass::DeviceAllocation<uint8_t> workspace; std::vector<int32_t> host_visited_problems; std::vector<int32_t> host_visited_tiles; ProblemVisitorRunner(const std::vector<cutlass::gemm::GemmCoord>& host_problem_sizes_, int32_t threadblock_count_): host_problem_sizes(host_problem_sizes_), problem_count(int32_t(host_problem_sizes_.size())), threadblock_count(threadblock_count_) {} /// Initializes GEMM state from arguments. cutlass::Status initialize() { size_t workspace_bytes = ProblemVisitor::get_workspace_size( host_problem_sizes.data(), problem_count, threadblock_count); workspace.reset(workspace_bytes); std::vector<uint8_t> host_workspace(workspace_bytes); int32_t tile_count = ProblemVisitor::group_tile_count(host_problem_sizes.data(), problem_count); ProblemVisitor::host_precompute(host_problem_sizes.data(), problem_count, threadblock_count, host_workspace.data()); workspace.copy_from_host(host_workspace.data(), workspace_bytes); device_problem_sizes.reset(problem_count); device_problem_sizes.copy_from_host(host_problem_sizes.data(), problem_count); visits_per_block = (tile_count - 1 + threadblock_count) / threadblock_count; int32_t total_visits = visits_per_block * threadblock_count; visited_problems.reset(total_visits); visited_tiles.reset(total_visits); host_visited_problems.resize(total_visits); host_visited_tiles.resize(total_visits); cudaError_t result = cudaMemset(visited_problems.get(), -1, sizeof(int32_t) * total_visits); if (result != cudaSuccess) { return cutlass::Status::kErrorInternal; } result = cudaMemset(visited_tiles.get(), -1, sizeof(int32_t) * total_visits); if (result != cudaSuccess) { return cutlass::Status::kErrorInternal; } typename ProblemVisitor::Params pv_params(device_problem_sizes.get(), problem_count, workspace.get(), tile_count); params = Params(pv_params, visited_problems.get(), visited_tiles.get(), visits_per_block); return cutlass::Status::kSuccess; } bool verify() { // Sort by problem size and then by threadblock_idx std::vector<int32_t> indices(host_visited_problems.size()); std::iota(indices.begin(), indices.end(), 0); std::stable_sort(indices.begin(), indices.end(), [&](int32_t i1, int32_t i2) { if (host_visited_problems[i1] == host_visited_problems[i2]) { return host_visited_tiles[i1] < host_visited_tiles[i2]; } return host_visited_problems[i1] < host_visited_problems[i2]; }); int32_t idx = 0; // Skip any entries that were not visited while (host_visited_problems[indices[idx]] == -1) { ++idx; } // Check that each problem visited has the tiles we expect for (int32_t problem_idx = 0; problem_idx < problem_count; ++problem_idx) { auto problem = host_problem_sizes[problem_idx]; ProblemVisitor::possibly_transpose_problem(problem); int32_t problem_tiles = ProblemVisitor::tile_count(ProblemVisitor::grid_shape(problem)); for (int i = 0; i < problem_tiles; ++i) { EXPECT_EQ(problem_idx, host_visited_problems[indices[idx]]); EXPECT_EQ(i, host_visited_tiles[indices[idx]]); ++idx; } } return true; } bool run(bool skip_tile_check=false, cudaStream_t stream = nullptr) { cutlass::Status status = initialize(); if (status != cutlass::Status::kSuccess) { std::cerr << "Initialization failed" << std::endl; return false; } dim3 grid(threadblock_count, 1, 1); dim3 block(ProblemVisitor::kThreadCount, 1, 1); int smem_size = int(sizeof(typename BaseKernel::SharedStorage)); cutlass::Kernel<BaseKernel><<<grid, block, smem_size, stream>>>(params); cudaError_t result = cudaGetLastError(); if (result != cudaSuccess) { std::cerr << "grid launch failed with error " << cudaGetErrorString(result) << std::endl; return false; } result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cerr << "cudaDeviceSynchronize failed with error " << cudaGetErrorString(result) << std::endl; return false; } visited_problems.copy_to_host(host_visited_problems.data()); visited_tiles.copy_to_host(host_visited_tiles.data()); if (skip_tile_check) { return true; } return verify(); } }; template <typename ThreadblockShape, int PrefetchTileCount, int ThreadCount, cutlass::FillMode FillModeC, cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode0, cutlass::gemm::kernel::GroupScheduleMode... Args> struct TestbedGroupedRank2KScheduler { using BaselinePV = BaselineProblemVisitor<cutlass::gemm::kernel::detail::Rank2KGroupedProblemSizeHelper<ThreadblockShape>, ThreadblockShape, PrefetchTileCount, ThreadCount, FillModeC>; // // Data members // // Whether to skip checking that the tiles are visited as expected. This is useful // in cases where ThreadblockShape::kM != ThreadblockShape::kN, for which the grouped // Rank2K scheduler may assign out-of-bounds tiles that will cause a threadblock to // exit early, but which are difficult to detect in tests without reimplementing // this functionality. bool skip_tile_check; uint32_t seed; int problem_count; int threadblock_count; std::vector<cutlass::gemm::GemmCoord> problem_sizes_host; // // Methods // TestbedGroupedRank2KScheduler(bool skip_tile_check_=false, uint32_t seed_ = 3080): skip_tile_check(skip_tile_check_), seed(seed_) { srand(seed); } /// Initializes data structures void initialize(int32_t scale_factor) { // // Choose random problem sizes // problem_sizes_host.clear(); problem_sizes_host.resize(problem_count); for (int32_t i = 0; i < problem_count; ++i) { int n = scale_factor * (rand() % 64) + 24; cutlass::gemm::GemmCoord problem( n, n, scale_factor * (rand() % 64) + 24); problem_sizes_host.at(i) = problem; } } template <cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode_> void compare_visitors(const ProblemVisitorRunner<BaselinePV>& baseline_runner) { using PV = cutlass::gemm::kernel::Rank2KGroupedProblemVisitor< ThreadblockShape, GroupScheduleMode_, PrefetchTileCount, ThreadCount, FillModeC>; ProblemVisitorRunner<PV> runner(problem_sizes_host, threadblock_count); EXPECT_TRUE(runner.run(skip_tile_check)); // Check that this problem visitor visits the same problems and tiles as the baseline EXPECT_EQ(baseline_runner.host_visited_problems, runner.host_visited_problems); EXPECT_EQ(baseline_runner.host_visited_tiles, runner.host_visited_tiles); } template <cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode1_, cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode2_, cutlass::gemm::kernel::GroupScheduleMode... Rest> void compare_visitors(const ProblemVisitorRunner<BaselinePV>& baseline_runner) { // Compare the next visitor with the baseline visitor compare_visitors<GroupScheduleMode1_>(baseline_runner); // Recurse to compare the next visitors compare_visitors<GroupScheduleMode2_, Rest...>(baseline_runner); } /// Executes the test on all scheduler modes void run(int problem_count, int threadblock_count, int scale_factor=8) { this->problem_count = problem_count; this->threadblock_count = threadblock_count; // Initialize the problem initialize(scale_factor); // Run the baseline visitor to which we will compare all other visitors ProblemVisitorRunner<BaselinePV> baseline_runner(problem_sizes_host, threadblock_count); EXPECT_TRUE(baseline_runner.run(skip_tile_check)); compare_visitors<Args...>(baseline_runner); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // device } // gemm } // test /////////////////////////////////////////////////////////////////////////////////////////////////
test/unit/gemm/device/testbed_grouped_rank_2k_scheduler.h/0
{ "file_path": "test/unit/gemm/device/testbed_grouped_rank_2k_scheduler.h", "repo_id": "test", "token_count": 6618 }
53
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide TRMM interface */ #include <iostream> #include "../../common/cutlass_unit_test.h" #include "cutlass/blas3.h" #include "cutlass/gemm/device/trmm.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/trmm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "testbed_trmm_universal.h" #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) ////////////////////////////////////////////Test name////////////////////////////////////////////////// // // SM80_Device_Trmm_{ElementA}{LayoutA}_{ElementB}{LayoutB}_{ElementC}{LayoutC}_{SideMode}_{FillMode}\ // _{DiagType}_tensor_op_{ElementAccumulator}_align{AlignmentA}_align{AlignmentB} // /////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_l_un_tensor_op_f32_align1_align1, 64x64x32_32x32x32) { using ElementOutput = float; using ElementAccumulator = float; using Trmm = cutlass::gemm::device::Trmm< float, cutlass::layout::RowMajor, cutlass::SideMode::kLeft, cutlass::FillMode::kLower, cutlass::DiagType::kUnit, float, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<32, 32, 32>, cutlass::gemm::GemmShape<16, 8, 8>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 1, ElementAccumulator, ElementAccumulator >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4, 1, 1, false, cutlass::arch::OpMultiplyAdd >; EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_l_un_tensor_op_f32_align1_align4, 64x64x32_32x32x32) { using ElementOutput = float; using ElementAccumulator = float; using Trmm = cutlass::gemm::device::Trmm< float, cutlass::layout::RowMajor, cutlass::SideMode::kLeft, cutlass::FillMode::kLower, cutlass::DiagType::kUnit, float, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<32, 32, 32>, cutlass::gemm::GemmShape<16, 8, 8>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4, 1, 4, false, cutlass::arch::OpMultiplyAdd >; EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_l_un_tensor_op_f32_align1_align4, 256x128x16_64x64x16) { using ElementOutput = float; using ElementAccumulator = float; using Trmm = cutlass::gemm::device::Trmm< float, cutlass::layout::RowMajor, cutlass::SideMode::kLeft, cutlass::FillMode::kLower, cutlass::DiagType::kUnit, float, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<256, 128, 16>, cutlass::gemm::GemmShape<64, 64, 16>, cutlass::gemm::GemmShape<16, 8, 8>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 1, 4, false, cutlass::arch::OpMultiplyAdd >; EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_l_un_tensor_op_f32_align1_align4, 128x256x32_64x64x32) { using ElementOutput = float; using ElementAccumulator = float; using Trmm = cutlass::gemm::device::Trmm< float, cutlass::layout::RowMajor, cutlass::SideMode::kLeft, cutlass::FillMode::kLower, cutlass::DiagType::kUnit, float, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<128, 256, 32>, cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 8>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 1, 4, false, cutlass::arch::OpMultiplyAdd >; EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_l_nu_tensor_op_f32_align1_align4, 256x128x32_64x64x32) { using ElementOutput = float; using ElementAccumulator = float; using Trmm = cutlass::gemm::device::Trmm< float, cutlass::layout::RowMajor, cutlass::SideMode::kLeft, cutlass::FillMode::kLower, cutlass::DiagType::kNonUnit, float, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<256, 128, 32>, cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 8>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 1, 4, false, cutlass::arch::OpMultiplyAdd >; EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_u_nu_tensor_op_f32_align1_align4, 64x64x16_32x32x16) { using ElementOutput = float; using ElementAccumulator = float; using Trmm = cutlass::gemm::device::Trmm< float, cutlass::layout::RowMajor, cutlass::SideMode::kLeft, cutlass::FillMode::kUpper, cutlass::DiagType::kNonUnit, float, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<64, 64, 16>, cutlass::gemm::GemmShape<32, 32, 16>, cutlass::gemm::GemmShape<16, 8, 8>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 10, 1, 4, false, cutlass::arch::OpMultiplyAdd >; EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_u_un_tensor_op_f32_align1_align4, 128x128x16_64x64x16) { using ElementOutput = float; using ElementAccumulator = float; using Trmm = cutlass::gemm::device::Trmm< float, cutlass::layout::RowMajor, cutlass::SideMode::kLeft, cutlass::FillMode::kUpper, cutlass::DiagType::kUnit, float, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<128, 128, 16>, cutlass::gemm::GemmShape<64, 64, 16>, cutlass::gemm::GemmShape<16, 8, 8>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4, 1, 4, false, cutlass::arch::OpMultiplyAdd >; EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// // This test fails on Ada when running with 11.8 #if ((__CUDACC_VER_MAJOR__ != 11) || (__CUDACC_VER_MINOR__ != 8) || (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ == 890))) TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_u_nu_tensor_op_f32_align1_align4, 256x128x16_128x64x16) { using ElementOutput = float; using ElementAccumulator = float; using Trmm = cutlass::gemm::device::Trmm< float, cutlass::layout::RowMajor, cutlass::SideMode::kLeft, cutlass::FillMode::kUpper, cutlass::DiagType::kNonUnit, float, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<256, 128, 16>, cutlass::gemm::GemmShape<128, 64, 16>, cutlass::gemm::GemmShape<16, 8, 8>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4, 1, 4, false, cutlass::arch::OpMultiplyAdd >; EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>()); } #endif ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_u_nu_tensor_op_f32_align1_align4, 128x256x16_64x128x16) { using ElementOutput = float; using ElementAccumulator = float; using Trmm = cutlass::gemm::device::Trmm< float, cutlass::layout::RowMajor, cutlass::SideMode::kLeft, cutlass::FillMode::kUpper, cutlass::DiagType::kNonUnit, float, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<128, 256, 16>, cutlass::gemm::GemmShape<64, 128, 16>, cutlass::gemm::GemmShape<16, 8, 8>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 1, 4, false, cutlass::arch::OpMultiplyAdd >; EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_Device_Trmm_tf32t_tf32n_f32t_ls_u_nu_tensor_op_f32_align1_align4, 256x256x16_64x128x16) { using ElementOutput = float; using ElementAccumulator = float; using Trmm = cutlass::gemm::device::Trmm< float, cutlass::layout::RowMajor, cutlass::SideMode::kLeft, cutlass::FillMode::kUpper, cutlass::DiagType::kNonUnit, float, cutlass::layout::ColumnMajor, ElementOutput, cutlass::layout::RowMajor, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<256, 256, 16>, cutlass::gemm::GemmShape<64, 128, 16>, cutlass::gemm::GemmShape<16, 8, 8>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 3, 1, 4, false, cutlass::arch::OpMultiplyAdd >; EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// #endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
test/unit/gemm/device/trmm_tf32t_tf32n_f32t_tensor_op_f32_ls_sm80.cu/0
{ "file_path": "test/unit/gemm/device/trmm_tf32t_tf32n_f32t_tensor_op_f32_ls_sm80.cu", "repo_id": "test", "token_count": 5533 }
54
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for threadblock-level GEMM */ #include "mma_multistage_sparse_testbed.h" #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED) //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_64x64x64_64x64x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 512); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_64x64x64_32x32x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 512); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_128x64x64_64x32x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 64, 512); using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_64x128x64_32x64x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 128, 512); using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_128x32x64_32x32x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 32, 512); using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_32x256x128_32x64x128_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(32, 256, 512); using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 128>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_128x16x64_32x16x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 16, 512); using ThreadblockShape = cutlass::gemm::GemmShape<128, 16, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_128x128x64_64x64x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 128, 512); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, multicta_256x256x768_128x128x64_64x64x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(256, 256, 768); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, multicta_512x256x768_256x128x64_64x64x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(512, 256, 768); using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x64_64x64x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 512); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x64_32x32x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 512); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x64x64_64x32x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 64, 512); using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x128x64_32x64x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 128, 512); using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x128x64_64x64x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 128, 512); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, multicta_256x256x768_128x128x64_64x64x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(256, 256, 768); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, multicta_512x256x768_256x128x64_64x64x64_16x8x32_4stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(512, 256, 768); using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_64x64x128_64x64x128_16x8x32_3stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 512); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_128x64x128_64x32x128_16x8x32_3stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 64, 512); using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 128>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_64x128x128_32x64x128_16x8x32_3stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 128, 512); using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 128>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_128x128x128_64x32x128_16x8x32_3stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 128, 512); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, multicta_256x256x768_128x128x128_64x32x128_16x8x32_3stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(256, 256, 768); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x128_64x64x128_16x8x32_3stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 512); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x128_32x32x128_16x8x32_3stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 512); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x64x128_64x32x128_16x8x32_3stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 64, 512); using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 128>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x128x128_32x64x128_16x8x32_3stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 128, 512); using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 128>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x128x128_64x32x128_16x8x32_3stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 128, 512); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, multicta_256x256x768_128x128x128_64x32x128_16x8x32_3stage) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(256, 256, 768); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 32>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_64x64x32_64x64x32_16x8x16_4stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 256); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_64x64x32_32x32x32_16x8x16_4stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 256); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_128x64x32_64x32x32_16x8x16_4stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 64, 256); using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_64x128x32_32x64x32_16x8x16_4stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 128, 256); using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_128x128x32_64x64x32_16x8x16_4stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 128, 256); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, multicta_256x256x384_128x128x32_64x64x32_16x8x16_4stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(256, 256, 384); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, multicta_512x256x384_256x128x32_64x64x32_16x8x16_4stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(512, 256, 384); using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x32_64x64x32_16x8x16_4stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 256); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::SparseTestbed<MmaCore>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x32_32x32x32_16x8x16_4stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 256); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x64x32_64x32x32_16x8x16_4stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 64, 256); using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x128x32_32x64x32_16x8x16_4stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 128, 256); using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x128x32_64x64x32_16x8x16_4stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 128, 256); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, multicta_256x256x384_128x128x32_64x64x32_16x8x16_4stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(256, 256, 384); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, multicta_512x256x384_256x128x32_64x64x32_16x8x16_4stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(512, 256, 384); using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_64x64x64_64x64x64_16x8x16_3stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 256); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_128x64x64_64x32x64_16x8x16_3stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 64, 256); using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_64x128x64_32x64x64_16x8x16_3stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 128, 256); using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, tensor_op_128x128x64_64x32x64_16x8x16_3stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 128, 256); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_congruous, multicta_256x256x384_128x128x64_64x32x64_16x8x16_3stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(256, 256, 384); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x64_64x64x64_16x8x16_3stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 256); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x64_32x32x64_16x8x16_3stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 256); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x64x64_64x32x64_16x8x16_3stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 64, 256); using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x128x64_32x64x64_16x8x16_3stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 128, 256); using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x128x64_64x32x64_16x8x16_3stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 128, 256); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, multicta_256x256x384_128x128x64_64x32x64_16x8x16_3stage) { using ElementA = cutlass::tfloat32_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::tfloat32_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(256, 256, 384); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x128_64x64x128_16x8x64_4stage) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 1024); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x128_32x32x128_16x8x64_4stage) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 1024); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 128>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x64x128_64x32x128_16x8x64_4stage) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 64, 1024); using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 128>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x128x128_32x64x128_16x8x64_4stage) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 128, 1024); using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 128>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x128x128_64x64x128_16x8x64_4stage) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 128, 1024); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, multicta_256x256x1536_128x128x128_64x64x128_16x8x64_4stage) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(256, 256, 1536); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, multicta_512x256x1536_256x128x128_64x64x128_16x8x64_4stage) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(512, 256, 1536); using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 128>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x256_64x64x256_16x8x64_3stage) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 1024); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 256>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x256_32x32x256_16x8x64_3stage) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 1024); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 256>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 256>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x64x256_64x32x256_16x8x64_3stage) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 64, 1024); using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 256>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 256>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x128x256_32x64x256_16x8x64_3stage) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 128, 1024); using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 256>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 256>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x128x256_64x32x256_16x8x64_3stage) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 128, 1024); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 256>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 256>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, multicta_256x256x1536_128x128x256_64x32x256_16x8x64_3stage) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(256, 256, 1536); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 256>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 256>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 64>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x256_64x64x256_16x8x128_4stage) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 2048); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 256>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x256_32x32x256_16x8x128_4stage) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 2048); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 256>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 256>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x64x256_64x32x256_16x8x128_4stage) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 64, 2048); using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 256>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 256>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x128x256_32x64x256_16x8x128_4stage) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 128, 2048); using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 256>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 256>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x128x256_64x64x256_16x8x128_4stage) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 128, 2048); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 256>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, multicta_256x256x3072_128x128x256_64x64x256_16x8x128_4stage) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(256, 256, 3072); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 256>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, multicta_512x256x3072_256x128x256_64x64x256_16x8x128_4stage) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(512, 256, 3072); using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 256>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 256>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; float alpha = 1.f; float beta = 0.0f; int const Stages = 4; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x512_64x64x512_16x8x128_3stage) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 2048); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 512>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x64x512_32x32x512_16x8x128_3stage) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 64, 2048); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 512>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 512>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x64x512_64x32x512_16x8x128_3stage) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 64, 2048); using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 512>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 512>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_64x128x512_32x64x512_16x8x128_3stage) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(64, 128, 2048); using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 512>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 512>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, tensor_op_128x128x512_64x32x512_16x8x128_3stage) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(128, 128, 2048); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 512>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 512>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// TEST(SM80_sparse_gemm_threadblock_crosswise, multicta_256x256x3072_128x128x512_64x32x512_16x8x128_3stage) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; cutlass::gemm::GemmCoord problem_size(256, 256, 3072); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 512>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 512>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 128>; float alpha = 1.f; float beta = 0.0f; int const Stages = 3; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultSparseMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, Stages>; dim3 grid(2, 2); dim3 block(32, 8, 1); test::gemm::threadblock::SparseTestbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } //////////////////////////////////////////////////////////////////////////////// #endif // #if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED)
test/unit/gemm/threadblock/mma_multistage_sparse.cu/0
{ "file_path": "test/unit/gemm/threadblock/mma_multistage_sparse.cu", "repo_id": "test", "token_count": 35810 }
55
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include "cutlass/arch/wmma.h" #ifdef CUTLASS_ARCH_WMMA_SM75_ENABLED #include "mma_pipelined_testbed.h" #include "cutlass/gemm/threadblock/default_mma_core_wmma.h" /// All tests use single staged (kStages=1) mma pipeline for the gemm mainloop /// Test name format: SM[arch]_gemm_threadblock_singlestage_wmma_tensor_op_[alayout]_[blayout]_[clayout]_[atype].[threadblock_shape]_[warp_shape]_[instruction_shape] ///////////////////////////////////////////////////////////////////////// /// Integer (s8 and u8) WMMA threadblock level tests //// ///////////////////////////////////////////////////////////////////////// #if defined(CUTLASS_ARCH_INTEGER_MATRIX_MULTIPLY_ENABLED) TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_s8, 64x64x32_64x64x32_16x16x16) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(64, 64, 128); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_s8, 64x64x64_64x64x64_16x16x16) { using ElementA = int8_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(64, 64, 128); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_col_row_row_s8, 64x64x32_64x64x32_16x16x16) { using ElementA = int8_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(64, 64, 128); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_col_row_row_s8, 64x64x64_64x64x64_16x16x16) { using ElementA = int8_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = int8_t; using LayoutB = cutlass::layout::RowMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(64, 64, 128); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } #endif //CUTLASS_ARCH_INTEGER_MATRIX_MULTIPLY_ENABLED //////////////////////////////////////////////////////////////////////// /// SUBBYTE (s4 and b1) WMMA threadblock level tests //// /////////////////////////////////////////////////////////////////////// #if defined(CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED) TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_s4, 64x64x128_64x64x128_8x8x32) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(64, 64, 128); using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 128>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; float alpha = 1.f; float beta = 0.f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_row_col_col_s4, 64x64x64_64x64x64_8x8x32) { using ElementA = cutlass::int4b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::int4b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(64, 64, 64); using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; float alpha = 1.f; float beta = 0.f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_b1, 64x64x512_64x64x512_8x8x128) { using ElementA = cutlass::uint1b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::uint1b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(64, 64, 2048); using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 512>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; float alpha = 1.f; float beta = 0.f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages, cutlass::arch::OpXorPopc>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM75_gemm_threadblock_singlestage_wmma_tensor_op_row_col_col_b1, 64x64x512_64x64x512_8x8x128) { using ElementA = cutlass::uint1b_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::uint1b_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = int32_t; using LayoutC = cutlass::layout::ColumnMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(64, 64, 2048); using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 512>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>; float alpha = 1.f; float beta = 0.f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages, cutlass::arch::OpXorPopc>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } #endif //CUTLASS_SUBBYTE_INTEGER_MATRIX_MULTIPLY_ENABLED #endif //CUTLASS_ARCH_WMMA_SM75_ENABLED
test/unit/gemm/threadblock/mma_singlestage_wmma_sm75.cu/0
{ "file_path": "test/unit/gemm/threadblock/mma_singlestage_wmma_sm75.cu", "repo_id": "test", "token_count": 4876 }
56
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include "cutlass/arch/wmma.h" #if defined(CUTLASS_ARCH_WMMA_SM72_ENABLED) #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/half.h" #include "cutlass/gemm/warp/default_mma_wmma_tensor_op.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/gemm.h" #include "testbed.h" /////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////// Integer wmma.mma //////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////// // TODO: SM75 should be SM72, but the compilation breaks as SM72 shows up and runs on VOLTA TEST(SM75_warp_wmma_row_col_s8, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = int8_t; using ElementB = int8_t; using ElementC = int32_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } TEST(SM75_warp_wmma_row_col_s8, 32x8x16_32x8x16_32x8x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; using ElementA = int8_t; using ElementB = int8_t; using ElementC = int32_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<32, 8, 16> >().run(); } TEST(SM75_warp_wmma_row_col_s8, 8x32x16_8x32x16_8x32x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; using ElementA = int8_t; using ElementB = int8_t; using ElementC = int32_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<8, 32, 16> >().run(); } TEST(SM75_warp_wmma_row_col_u8, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = uint8_t; using ElementB = uint8_t; using ElementC = int32_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } TEST(SM75_warp_wmma_row_col_u8, 32x8x16_32x8x16_32x8x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; using ElementA = uint8_t; using ElementB = uint8_t; using ElementC = int32_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<32, 8, 16> >().run(); } TEST(SM75_warp_wmma_row_col_u8, 8x32x16_8x32x16_8x32x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; using ElementA = uint8_t; using ElementB = uint8_t; using ElementC = int32_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<8, 32, 16> >().run(); } #endif //CUTLASS_ARCH_WMMA_SM72_ENABLED
test/unit/gemm/warp/wmma_sm72.cu/0
{ "file_path": "test/unit/gemm/warp/wmma_sm72.cu", "repo_id": "test", "token_count": 2610 }
57
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief CUTLASS Library handle. */ #include <iostream> #include <stdexcept> #include <cstdint> #include "cutlass/library/handle.h" #include "cutlass/library/singleton.h" #include "cutlass/library/util.h" namespace cutlass { namespace library { /////////////////////////////////////////////////////////////////////////////////////////////////// /// Constructor Handle::Handle( cudaStream_t stream, size_t workspace_size ): provider_(Provider::kCUTLASS), stream_(stream), workspace_(nullptr), workspace_size_(0), scalar_pointer_mode_(ScalarPointerMode::kHost), last_operation_(nullptr) { int device_idx = -1; cudaError_t error = cudaGetDevice(&device_idx); if (error != cudaSuccess) { throw std::runtime_error("cudaGetDevice() failed"); } error = cudaGetDeviceProperties(&device_, device_idx); if (error != cudaSuccess) { throw std::runtime_error("cudaGetDeviceProperties() failed"); } set_workspace_size(workspace_size); Singleton::get(); } /// Destructor Handle::~Handle() { if (workspace_) { if (workspace_) { cudaFree(workspace_); } workspace_ = nullptr; workspace_size_ = 0; } } /// Move constructor Handle::Handle(Handle && handle) { device_ = handle.device_; workspace_size_ = handle.workspace_size_; workspace_ = handle.workspace_; stream_ = handle.stream_; scalar_pointer_mode_ = handle.scalar_pointer_mode_; handle.workspace_ = nullptr; handle.workspace_size_ = 0; } /// Move assignment operator Handle & Handle::operator=(Handle && handle) { provider_ = handle.provider_; device_ = handle.device_; workspace_size_ = handle.workspace_size_; workspace_ = handle.workspace_; stream_ = handle.stream_; scalar_pointer_mode_ = handle.scalar_pointer_mode_; handle.workspace_ = nullptr; handle.workspace_size_ = 0; return *this; } int Handle::compute_capability() const { return device_.major * 10 + device_.minor; } /// Sets the current CUDA stream void Handle::set_stream(cudaStream_t stream) { stream_ = stream; } /// Gets the current CUDA stream cudaStream_t Handle::get_stream() const { return stream_; } /// Gets the current provider Provider Handle::get_provider() const { return provider_; } /// Sets the provider of operations void Handle::set_provider(Provider provider) { provider_ = provider; } /// Gets the device workspace size size_t Handle::get_workspace_size() const { return workspace_size_; } /// Gets a pointer to the device workspace allocation in Global Memory void *Handle::get_workspace() const { return workspace_; } /// Sets the size of device workspace, invalidating previous calls to get_device_workspace() void Handle::set_workspace_size(size_t bytes) { if (bytes != workspace_size_) { if (workspace_) { cudaFree(workspace_); } workspace_ = nullptr; workspace_size_ = bytes; if (workspace_size_) { cudaError_t error = cudaMalloc((void **)&workspace_, workspace_size_); if (error != cudaSuccess) { throw std::runtime_error("Failed to allocate workspace"); } } } if (workspace_) { cudaError_t error = cudaMemset(workspace_, 0, workspace_size_); if (error != cudaSuccess) { throw std::runtime_error("Failed to clear workspace"); } } } /// Gets the scalar pointer mode ScalarPointerMode Handle::get_scalar_pointer_mode() const { return scalar_pointer_mode_; } /// Sets the scalar pointer mode void Handle::set_scalar_pointer_mode(ScalarPointerMode mode) { scalar_pointer_mode_ = mode; } /// Gets the last operation Operation const *Handle::get_last_operation() const { return last_operation_; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Returns the maximum required alignment for each operator static int maximum_alignment_requirement(GemmDescription const &desc) { return std::max( std::max(desc.A.alignment, desc.B.alignment), desc.C.alignment); } /// Returns the largest alignment (in units of elements) the problem satisfies, starting from a /// given upper limit. static int gemm_problem_alignment( int M, int N, int K, NumericTypeID element_A, void const *ptr_A, int64_t lda, int64_t batch_stride_A, NumericTypeID element_B, void const *ptr_B, int64_t ldb, int64_t batch_stride_B, NumericTypeID element_C, void const * ptr_C, int64_t ldc, int64_t batch_stride_C, void const * ptr_D, int64_t ldd, int64_t batch_stride_D, int max_alignment_in_bytes = 16 ) { void const *pointers[] = { ptr_A, ptr_B, ptr_C, ptr_D }; int64_t extents[] = { M, N, K, lda, ldb, ldc, ldd, batch_stride_A, batch_stride_B, batch_stride_C, batch_stride_D }; NumericTypeID elements[] = { element_A, element_B, element_C }; for (; max_alignment_in_bytes > 0; max_alignment_in_bytes /= 2) { bool satisfied = true; // Can pointers satisfy this? for (void const *ptr : pointers) { std::uintptr_t int_ptr = reinterpret_cast<std::uintptr_t>(ptr); if (int_ptr % max_alignment_in_bytes) { satisfied = false; break; } } if (!satisfied) { continue; } // Compute the maximum alignment based on element data types int max_element_alignment = 0; for (NumericTypeID type_id : elements) { int element_alignment = max_alignment_in_bytes * 8 / library::sizeof_bits(type_id); max_element_alignment = std::max(max_element_alignment, element_alignment); } // Can the problem size and leading dimensions satisfy this? for (int64_t extent : extents) { if (extent % max_element_alignment) { satisfied = false; break; } } if (!satisfied) { continue; } // Yes return max_element_alignment; } // No alignment satisfies this problem return 0; } /// Find the best kernel in descending order of preference. static Operation const * find_gemm_operation( GemmOperationFunctionalMap::const_iterator operators_it, GemmPreferenceKey const preference_key) { auto cc_it = operators_it->second.upper_bound(preference_key); if (cc_it == operators_it->second.begin()) { return nullptr; } Operation const *operation = nullptr; // Search in descending order of compute capability do { --cc_it; // Search tile sizes in order, for now. for (auto const * op : cc_it->second) { GemmDescription const &desc = static_cast<GemmDescription const &>(op->description()); int min_cc = desc.tile_description.minimum_compute_capability; int max_cc = desc.tile_description.maximum_compute_capability; int op_alignment = maximum_alignment_requirement(desc); if ((min_cc <= preference_key.compute_capability) && (preference_key.compute_capability <= max_cc) && (op_alignment <= preference_key.alignment)) { operation = op; break; } } } while (!operation && cc_it != operators_it->second.begin()); return operation; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Executes a GEMM computation: D <= alpha * A*B + beta * C Status Handle::gemm( int M, /// GEMM M dimension int N, /// GEMM N dimension int K, /// GEMM K dimension NumericTypeID element_compute, /// Data type of internal accumulation NumericTypeID element_scalar, /// Data type of alpha/beta scalars void const *alpha, /// Pointer to alpha scalar NumericTypeID element_A, /// Data type of A matrix elements LayoutTypeID layout_A, /// Layout of A matrix ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices void const * ptr_A, /// Pointer to A matrix in Global Memory int64_t lda, /// Leading dimension of A matrix NumericTypeID element_B, /// Data type of B matrix elements LayoutTypeID layout_B, /// Layout of B matrix ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices void const * ptr_B, /// Pointer to B matrix in Global Memory int64_t ldb, /// Leading dimension of B matrix void const * beta, /// Pointer to beta scalar NumericTypeID element_C, /// Data type of C and D matrices void const * ptr_C, /// Pointer to C matrix int64_t ldc, /// Leading dimension of C matrix void * ptr_D, /// Pointer to D matrix int64_t ldd /// Leading dimension of D matrix ) { // // Find the operation // GemmFunctionalKey key( provider_, GemmKind::kGemm, element_compute, element_scalar, element_A, layout_A, transform_A, element_B, layout_B, transform_B, element_C, // C/D are same type and col major default LayoutTypeID::kColumnMajor, element_C, LayoutTypeID::kColumnMajor ); auto operators_it = Singleton::get().operation_table.gemm_operations.find(key); if (operators_it == Singleton::get().operation_table.gemm_operations.end()) { return cutlass::Status::kErrorNotSupported; } if (operators_it->second.empty()) { return cutlass::Status::kErrorNotSupported; } // // Compute the largest alignment restriction the kernel can satisfy. // // Maximum alignment expectation among all kernels (in units of bytes) int const kMaximumAlignmentSize = 16; int alignment = gemm_problem_alignment( M, N, K, element_A, ptr_A, lda, 0, element_B, ptr_B, ldb, 0, element_C, ptr_C, ldc, 0, ptr_D, ldd, 0, kMaximumAlignmentSize ); // // Find the best kernel in descending order of preference. // GemmPreferenceKey preference_key(compute_capability(), alignment); Operation const *operation = find_gemm_operation(operators_it, preference_key); if (!operation) { return cutlass::Status::kErrorNotSupported; } last_operation_ = operation; // // Configure operation // GemmConfiguration configuration{ {M, N, K}, lda, ldb, ldc, ldd, 1 }; // Query host work space size uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration); if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) { return cutlass::Status::kErrorNotSupported; } char host_workspace[kHostWorkspaceSize]; // Query device workspace size uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration); if (uint64_t(workspace_size_) < device_workspace_size_needed) { return cutlass::Status::kErrorNotSupported; } // Initialize host and device workspaces Status status = operation->initialize( &configuration, host_workspace, workspace_, stream_); if (status != cutlass::Status::kSuccess) { return status; } // Run the operator GemmArguments arguments{ ptr_A, ptr_B, ptr_C, ptr_D, alpha, beta, scalar_pointer_mode_ }; return operation->run(&arguments, host_workspace, workspace_, stream_); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Executes a GEMM computation: D <= alpha * A*B + beta * C. // // Supports batched-strided, batched array or split-K serial or split-K parallel. // Status Handle::gemm_universal( GemmUniversalMode mode, /// indicates the mode in which the kUniversal GEMM is launched int M, /// GEMM M dimension int N, /// GEMM N dimension int K, /// GEMM K dimension NumericTypeID element_compute, /// Data type of internal accumulation NumericTypeID element_scalar, /// Data type of alpha/beta scalars void const *alpha, /// Pointer to alpha scalar NumericTypeID element_A, /// Data type of A matrix elements LayoutTypeID layout_A, /// Layout of A matrix ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices void const * ptr_A, /// Pointer to A matrix in Global Memory int64_t lda, /// Leading dimension of A matrix NumericTypeID element_B, /// Data type of B matrix elements LayoutTypeID layout_B, /// Layout of B matrix ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices void const * ptr_B, /// Pointer to B matrix in Global Memory int64_t ldb, /// Leading dimension of B matrix void const * beta, /// Pointer to beta scalar NumericTypeID element_C, /// Data type of C matrix LayoutTypeID layout_C, /// Layout of D matrix void const * ptr_C, /// Pointer to C matrix int64_t ldc, /// Leading dimension of C matrix NumericTypeID element_D, /// Data type of D matrix LayoutTypeID layout_D, /// Layout of D matrix void * ptr_D, /// Pointer to D matrix int64_t ldd, /// Leading dimension of D matrix int batch_count, /// Batch count or number of split-K slices int64_t batch_stride_A, /// Batch stride of A operand int64_t batch_stride_B, /// Batch stride of B operand int64_t batch_stride_C, /// Batch stride of C operand int64_t batch_stride_D /// Batch stride of D operand ) { // // Find the operation // GemmFunctionalKey key( provider_, GemmKind::kUniversal, element_compute, element_scalar, element_A, layout_A, transform_A, element_B, layout_B, transform_B, element_C, layout_C, element_D, layout_D ); auto operators_it = Singleton::get().operation_table.gemm_operations.find(key); if (operators_it == Singleton::get().operation_table.gemm_operations.end()) { return cutlass::Status::kErrorNotSupported; } if (operators_it->second.empty()) { return cutlass::Status::kErrorNotSupported; } // // Compute the largest alignment restriction the kernel can satisfy. // // Maximum alignment expectation among all kernels (in units of bytes) int const kMaximumAlignmentSize = 16; void const *ptr_A_check = ptr_A; void const *ptr_B_check = ptr_B; void const *ptr_C_check = ptr_C; void * ptr_D_check = ptr_D; // Ignore alignment of pointers to pointers. We can't check this from the host, // as each batch index has its own pointer in device memory. if (mode == GemmUniversalMode::kArray) { ptr_A_check = nullptr; ptr_B_check = nullptr; ptr_C_check = nullptr; ptr_D_check = nullptr; } int alignment = gemm_problem_alignment( M, N, K, element_A, ptr_A_check, lda, 0, element_B, ptr_B_check, ldb, 0, element_C, ptr_C_check, ldc, 0, ptr_D_check, ldd, 0, kMaximumAlignmentSize ); // // Find the best kernel in descending order of preference. // GemmPreferenceKey preference_key(compute_capability(), alignment); Operation const *operation = find_gemm_operation(operators_it, preference_key); if (!operation) { return cutlass::Status::kErrorNotSupported; } last_operation_ = operation; // // Configure operation // GemmUniversalConfiguration configuration{ mode, {M, N, K}, batch_count, lda, ldb, ldc, ldd }; // Query host work space size uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration); if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) { return cutlass::Status::kErrorNotSupported; } char host_workspace[kHostWorkspaceSize]; GemmUniversalArguments arguments{ {M, N, K}, batch_count, ptr_A, ptr_B, ptr_C, ptr_D, alpha, beta, scalar_pointer_mode_, lda, ldb, ldc, ldd, batch_stride_A, batch_stride_B, batch_stride_C, batch_stride_D }; // Query device workspace size uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration, &arguments); if (uint64_t(workspace_size_) < device_workspace_size_needed) { return cutlass::Status::kErrorNotSupported; } // Initialize host and device workspaces Status status = operation->initialize( &configuration, host_workspace, workspace_, stream_); if (status != cutlass::Status::kSuccess) { return status; } // Run the operator return operation->run(&arguments, host_workspace, workspace_, stream_); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Planar complex GEMM Status Handle::gemm_planar_complex( int M, /// GEMM M dimension int N, /// GEMM N dimension int K, /// GEMM K dimension NumericTypeID element_compute, /// Data type of internal accumulation NumericTypeID element_scalar, /// Data type of alpha/beta scalars void const *alpha, /// Pointer to alpha scalar NumericTypeID element_A, /// Data type of A matrix elements LayoutTypeID layout_A, /// Layout of A matrix ComplexTransform transform_A, /// Complex transformation applied to A matrix void const * ptr_A_real, /// Pointer to real part of A matrix void const * ptr_A_imag, /// Pointer to imaginary part of A matrix int64_t lda_real, /// Leading dimension of real part of A matrix int64_t lda_imag, /// Leading dimension of imaginary part of A matrix NumericTypeID element_B, /// Data type of B matrix elements LayoutTypeID layout_B, /// Layout of B matrix ComplexTransform transform_B, /// Complex transformation applied to B matrix void const * ptr_B_real, /// Pointer to real part of B matrix void const * ptr_B_imag, /// Pointer to imaginary part of B matrix int64_t ldb_real, /// Leading dimension of real part of B matrix int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix void const * beta, /// Pointer to beta scalar NumericTypeID element_C, /// Data type of C and D matrix void const * ptr_C_real, /// Pointer to real part of C matrix void const * ptr_C_imag, /// Pointer to imaginary part of C matrix int64_t ldc_real, /// Leading dimension of real part of C matrix int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix void * ptr_D_real, /// Pointer to real part of D matrix void * ptr_D_imag, /// Pointer to imaginary part of D matrix int64_t ldd_real, /// Leading dimension of real part of D matrix int64_t ldd_imag, /// Leading dimension of imaginary part of D matrix int batch_count, /// Number of batched GEMMs to execute int64_t batch_stride_A_real, int64_t batch_stride_A_imag, int64_t batch_stride_B_real, int64_t batch_stride_B_imag, int64_t batch_stride_C_real, int64_t batch_stride_C_imag, int64_t batch_stride_D_real, int64_t batch_stride_D_imag ) { // // Find the operation // GemmFunctionalKey key( provider_, GemmKind::kPlanarComplex, element_compute, element_scalar, element_A, layout_A, transform_A, element_B, layout_B, transform_B, element_C, // C/D are same type LayoutTypeID::kColumnMajor, element_C, LayoutTypeID::kColumnMajor ); auto operators_it = Singleton::get().operation_table.gemm_operations.find(key); if (operators_it == Singleton::get().operation_table.gemm_operations.end()) { return cutlass::Status::kErrorNotSupported; } if (operators_it->second.empty()) { return cutlass::Status::kErrorNotSupported; } // // Compute the largest alignment restriction the kernel can satisfy. // // Maximum alignment expectation among all kernels (in units of bytes) int const kMaximumAlignmentSize = 16; int alignment = std::max( gemm_problem_alignment( M, N, K, element_A, ptr_A_real, lda_real, batch_stride_A_real, element_B, ptr_B_real, ldb_real, batch_stride_B_real, element_C, ptr_C_real, ldc_real, batch_stride_C_real, ptr_D_real, ldd_real, batch_stride_D_real, kMaximumAlignmentSize ), gemm_problem_alignment( M, N, K, element_A, ptr_A_imag, lda_imag, batch_stride_A_imag, element_B, ptr_B_imag, ldb_imag, batch_stride_B_imag, element_C, ptr_C_imag, ldc_imag, batch_stride_C_imag, ptr_D_imag, ldd_imag, batch_stride_D_imag, kMaximumAlignmentSize ) ); // // Find the best kernel in descending order of preference. // GemmPreferenceKey preference_key(compute_capability(), alignment); Operation const *operation = find_gemm_operation(operators_it, preference_key); if (!operation) { return cutlass::Status::kErrorNotSupported; } last_operation_ = operation; // // Configure operation // GemmPlanarComplexConfiguration configuration{ GemmUniversalMode::kBatched, {M, N, K}, batch_count, lda_real, lda_imag, ldb_real, ldb_imag, ldc_real, ldc_imag, ldd_real, ldd_imag }; // Query host work space size uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration); if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) { return cutlass::Status::kErrorNotSupported; } char host_workspace[kHostWorkspaceSize]; // Query device workspace size uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration); if (uint64_t(workspace_size_) < device_workspace_size_needed) { return cutlass::Status::kErrorNotSupported; } // Initialize host and device workspaces Status status = operation->initialize( &configuration, host_workspace, workspace_, stream_); if (status != cutlass::Status::kSuccess) { return status; } // Run the operator GemmPlanarComplexArguments arguments{ ptr_A_real, ptr_A_imag, ptr_B_real, ptr_B_imag, ptr_C_real, ptr_C_imag, ptr_D_real, ptr_D_imag, alpha, beta, scalar_pointer_mode_, batch_stride_A_real, batch_stride_A_imag, batch_stride_B_real, batch_stride_B_imag, batch_stride_C_real, batch_stride_C_imag, batch_stride_D_real, batch_stride_D_imag }; return operation->run(&arguments, host_workspace, workspace_, stream_); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Planar complex batched GEMM loading pointers from arrays in global memory Status Handle::gemm_planar_complex_array( int expected_M, /// Expected GEMM M dimension (used for sizing CUDA grid) int expected_N, /// Expected GEMM N dimension (used for sizing CUDA grid) int expected_K, /// Expected GEMM K dimension int batch_count, /// Number of independent GEMM computations to execute int const *M, /// Array containing the GEMM M dimension for each batch index int const *N, /// Array containing the GEMM N dimension for each batch index int const *K, /// Array containing the GEMM K dimension for each batch index NumericTypeID element_compute, /// Data type of internal accumulation NumericTypeID element_scalar, /// Data type of alpha/beta scalars void const *alpha, /// Pointer to alpha scalar NumericTypeID element_A, /// Data type of A matrix elements LayoutTypeID layout_A, /// Layout of A matrix ComplexTransform transform_A, /// Complex transformation applied to A matrix void const * const * ptr_A_real, /// Pointer to array containing pointers to real part of A matrices void const * const * ptr_A_imag, /// Pointer to array containing pointers to imaginary part of A matrices int64_t lda_real, /// Leading dimension of real part of A matrix int64_t lda_imag, /// Leading dimension of imaginary part of A matrix NumericTypeID element_B, /// Data type of B matrix elements LayoutTypeID layout_B, /// Layout of B matrix ComplexTransform transform_B, /// Complex transformation applied to B matrix void const * const * ptr_B_real, /// Pointer to array containing pointers to real part of B matrices void const * const * ptr_B_imag, /// Pointer to array containing pointers to imaginary part of B matrices int64_t ldb_real, /// Leading dimension of real part of B matrix int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix void const * beta, /// Pointer to beta scalar NumericTypeID element_C, /// Data type of C and D matrix void const * const * ptr_C_real, /// Pointer to array containing pointers to real part of C matrices void const * const * ptr_C_imag, /// Pointer to array containing pointers to imaginary part of C matrices int64_t ldc_real, /// Leading dimension of real part of C matrix int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix void * const * ptr_D_real, /// Pointer to array containing pointers to real part of D matrices void * const * ptr_D_imag, /// Pointer to array containing pointers to imaginary part of D matrices int64_t ldd_real, /// Leading dimension of real part of D matrix int64_t ldd_imag /// Leading dimension of imaginary part of D matrix ) { // // Find the operation // GemmFunctionalKey key( provider_, GemmKind::kPlanarComplexArray, element_compute, element_scalar, element_A, layout_A, transform_A, element_B, layout_B, transform_B, element_C, // C/D are same type LayoutTypeID::kColumnMajor, element_C, LayoutTypeID::kColumnMajor ); auto operators_it = Singleton::get().operation_table.gemm_operations.find(key); if (operators_it == Singleton::get().operation_table.gemm_operations.end()) { return cutlass::Status::kErrorNotSupported; } if (operators_it->second.empty()) { return cutlass::Status::kErrorNotSupported; } // // Compute the largest alignment restriction the kernel can satisfy. // // Maximum alignment expectation among all kernels (in units of bytes) int const kMaximumAlignmentSize = 16; int alignment = std::max( gemm_problem_alignment( expected_M, expected_N, expected_K, element_A, nullptr, lda_real, 0, element_B, nullptr, ldb_real, 0, element_C, nullptr, ldc_real, 0, nullptr, ldd_real, 0, kMaximumAlignmentSize ), gemm_problem_alignment( expected_M, expected_N, expected_K, element_A, nullptr, lda_imag, 0, element_B, nullptr, ldb_imag, 0, element_C, nullptr, ldc_imag, 0, nullptr, ldd_imag, 0, kMaximumAlignmentSize ) ); // // Find the best kernel in descending order of preference. // GemmPreferenceKey preference_key(compute_capability(), alignment); Operation const *operation = find_gemm_operation(operators_it, preference_key); if (!operation) { return cutlass::Status::kErrorNotSupported; } last_operation_ = operation; // // Configure operation // GemmPlanarComplexArrayConfiguration configuration{ {expected_M, expected_N, expected_K}, batch_count, lda_real, lda_imag, ldb_real, ldb_imag, ldc_real, ldc_imag, ldd_real, ldd_imag }; // Query host work space size uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration); if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) { return cutlass::Status::kErrorNotSupported; } char host_workspace[kHostWorkspaceSize]; // Query device workspace size uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration); if (uint64_t(workspace_size_) < device_workspace_size_needed) { return cutlass::Status::kErrorNotSupported; } // Initialize host and device workspaces Status status = operation->initialize( &configuration, host_workspace, workspace_, stream_); if (status != cutlass::Status::kSuccess) { return status; } // Run the operator GemmPlanarComplexArrayArguments arguments{ M, N, K, ptr_A_real, ptr_A_imag, ptr_B_real, ptr_B_imag, ptr_C_real, ptr_C_imag, ptr_D_real, ptr_D_imag, alpha, beta, scalar_pointer_mode_ }; return operation->run(&arguments, host_workspace, workspace_, stream_); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Finds conv operation instances with Conv::ElementC = Reduction::ElementWorkspace Operation const* find_conv_operation_for_parallel_reduction(Operation const *operation) { ConvDescription const &conv_desc = static_cast<ConvDescription const &>(operation->description()); // if the curren conv operation accumulator and output data type match return operation if(conv_desc.tile_description.math_instruction.element_accumulator == conv_desc.C.element) { return operation; } // find conv operation to match conv output and reduction workspace data type ConvFunctionalKey key( library::Provider::kCUTLASS, conv_desc.conv_kind, conv_desc.A.element, conv_desc.A.layout, conv_desc.B.element, conv_desc.B.layout, conv_desc.tile_description.math_instruction.element_accumulator, conv_desc.C.layout, conv_desc.tile_description.math_instruction.element_accumulator, conv_desc.element_epilogue); // conv operation table for conv2d or conv3d auto conv_operations = (conv_desc.kind == OperationKind::kConv2d) ? Singleton::get().operation_table.conv2d_operations : Singleton::get().operation_table.conv3d_operations; // find ConvFunctionalKey in convolution operation table auto operators_it = conv_operations.find(key); if (operators_it == conv_operations.end()) { return nullptr; } if (operators_it->second.empty()) { return nullptr; } // conv operation for same compute capability and iterator algorithm ConvPreferenceKey preference_key( conv_desc.tile_description.minimum_compute_capability, conv_desc.iterator_algorithm); auto it = operators_it->second.find(preference_key); if(it == operators_it->second.end()) { return nullptr; } // return matching conv opertion (same tile sizes and instruction) for (auto op : it->second) { if (op->description().tile_description == operation->description().tile_description) { return op; } } return nullptr; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Finds gemm operation instances with Gemm::ElementC = Reduction::ElementWorkspace Operation const* find_gemm_operation_for_parallel_reduction(Operation const *operation) { GemmDescription const &gemm_desc = static_cast<GemmDescription const &>(operation->description()); // if the curren gemm operation accumulator and output data type match return operation if(gemm_desc.tile_description.math_instruction.element_accumulator == gemm_desc.D.element) { return operation; } // find gemm operation to match gemm output and reduction workspace data type GemmFunctionalKey key( library::Provider::kCUTLASS, gemm_desc.gemm_kind, gemm_desc.tile_description.math_instruction.element_accumulator, gemm_desc.element_epilogue, gemm_desc.A.element, gemm_desc.A.layout, gemm_desc.transform_A, gemm_desc.B.element, gemm_desc.B.layout, gemm_desc.transform_B, gemm_desc.tile_description.math_instruction.element_accumulator, // C/D are same type LayoutTypeID::kColumnMajor, gemm_desc.tile_description.math_instruction.element_accumulator, LayoutTypeID::kColumnMajor); // gemm operation table auto gemm_operations = Singleton::get().operation_table.gemm_operations; // find ConvFunctionalKey in gemm operation table auto operators_it = gemm_operations.find(key); if (operators_it == gemm_operations.end()) { return nullptr; } if (operators_it->second.empty()) { return nullptr; } // gemm operation for same compute capability and max operand alignment int alignment = std::max( gemm_desc.A.alignment, gemm_desc.B.alignment); GemmPreferenceKey preference_key( gemm_desc.tile_description.minimum_compute_capability, alignment); auto it = operators_it->second.find(preference_key); if(it == operators_it->second.end()) { return nullptr; } // return matching gemm opertion (same tile shape, stages, warp count, and instruction) for (auto op : it->second) { if (op->description().tile_description == operation->description().tile_description) { return op; } } // return nullptr if no matching gemm operation found for parallel split-k reduction return nullptr; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
tools/library/src/handle.cu/0
{ "file_path": "tools/library/src/handle.cu", "repo_id": "tools", "token_count": 13942 }
58
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief "Any sufficiently complicated C or Fortran program contains an ad-hoc, informally-specified, bug-ridden, slow implementation of half of Common Lisp." - Greenspun's Tenth Rule of Programming cutlass::profiler::ProblemSpace defines a set of data structures which represent the Cartesian product of sequences defined by integer ranges, lists of scalars, and sets of enumerated types. These permit a single invocation of the CUTLASS Profiler to iterate over a large set of problems, verify and profile various operations when they are compatible with the command line, and construct data tables of results that are convenient inputs to post processing in Excel or Pandas. By executing multiple problems per invocation, startup overheads may be amortized across many kernel launches. */ #pragma once // Standard Library includes #include <string> #include <vector> #include <memory> #include <unordered_map> #include <cstdlib> // CUTLASS Utility includes #include "cutlass/util/command_line.h" // CUTLASS Library includes #include "cutlass/library/library.h" // Profiler includes #include "enumerated_types.h" namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines the argument schema struct ArgumentDescription { /// Type of argument ArgumentTypeID type; /// Prioritized array of aliases used in command line parsing std::vector<std::string> aliases; /// Description of argument std::string description; // // Methods // /// Default ctor ArgumentDescription(): type(ArgumentTypeID::kInvalid) { } /// Constructor with aliases ArgumentDescription( ArgumentTypeID type_, std::vector<std::string> const &aliases_, std::string const &description_ ): type(type_), aliases(aliases_), description(description_) { } }; /// Vector of arguments using ArgumentDescriptionVector = std::vector<ArgumentDescription>; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Base class for kernel arguments struct KernelArgument { // // Type definitions // /// Value base class struct Value { KernelArgument const *argument; bool not_null; // // Methods // Value( KernelArgument const *argument_ = nullptr, bool not_null_ = true ): argument(argument_), not_null(not_null_) { } virtual ~Value() { } virtual std::ostream &print(std::ostream &out) const =0; }; /// Abstract base class to iterate over values within arguments struct ValueIterator { /// Indicates type of kernel argument KernelArgument const *argument; /// If the iterator points to an argument that is null, it needs to be distinguished /// from end. bool null_argument; // // Methods // /// Constructs a value iterator - no methods are valid if argument_ == nullptr ValueIterator( KernelArgument const *argument_ = nullptr, bool null_argument_ = false): argument(argument_), null_argument(null_argument_) { if (!argument_->not_null()) { null_argument = true; } } virtual ~ValueIterator() { } /// Advances to next point in range virtual void operator++() = 0; /// Compares against another value iterator - must be of the same KernelArgument type virtual bool operator==(ValueIterator const &it) const = 0; /// Returns a unique_ptr<Value> object pointing to a newly created value object virtual std::unique_ptr<Value> at() const = 0; /// Gets the type of the iterator ArgumentTypeID type() const { return argument->description->type; } /// Helper to compute inequality bool operator!=(ValueIterator const &it) const { return !(*this == it); } std::ostream &print(std::ostream &out) const; }; // // Data members // /// Describes the argument ArgumentDescription const *description; /// Parent node KernelArgument *parent; /// Sequence in which the kernel argument is to be iterated over. /// Smaller means faster changing. -1 is don't care int ordinal; // // Methods // /// Default ctor KernelArgument( ArgumentDescription const *description_ = nullptr, KernelArgument *parent_ = nullptr, int ordinal_ = -1 ): description(description_), parent(parent_), ordinal(ordinal_) { } virtual ~KernelArgument(); /// Returns true if the kernel argument iself is empty virtual bool not_null() const =0; /// Returns a string name for debugging std::string qualified_name() const { if (description) { if (description->aliases.empty()) { return "<description_not_null_no_aliases>"; } return description->aliases.front(); } return "<description_null>"; } virtual std::unique_ptr<ValueIterator> begin() const =0; virtual std::unique_ptr<ValueIterator> end() const =0; }; using KernelArgumentVector = std::vector<std::unique_ptr<KernelArgument>>; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a scalar argument type as a string that is lexically cast to the appropriate kernel /// type. struct ScalarArgument : public KernelArgument { // // Type definitions // /// Value type struct ScalarValue : public KernelArgument::Value { std::string value; // // Methods // ScalarValue( std::string const &value_ = "", ScalarArgument const *argument = nullptr, bool not_null_ = true ); virtual std::ostream &print(std::ostream &out) const; }; using ValueCollection = std::vector<decltype(ScalarValue::value)>; /// Abstract base class to iterate over values within arguments struct ScalarValueIterator : public KernelArgument::ValueIterator { // // Data members // ValueCollection::const_iterator value_it; // // Methods // explicit ScalarValueIterator(ScalarArgument const *argument = nullptr); virtual void operator++(); virtual bool operator==(ValueIterator const &it) const; /// Gets the value pointed to virtual std::unique_ptr<KernelArgument::Value> at() const; }; // // Data members // /// Set of possible values ValueCollection values; // // Methods // /// Default ctor explicit ScalarArgument( ArgumentDescription const *description ): KernelArgument(description) { } virtual bool not_null() const { return !values.empty(); } virtual std::unique_ptr<KernelArgument::ValueIterator> begin() const; virtual std::unique_ptr<KernelArgument::ValueIterator> end() const; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Closed range supporting additive increment struct Range { // // Type definitions // enum class Mode { kSequence, kRandom, kRandomLog2, kInvalid }; struct Iterator { int64_t value; int64_t increment; Range const *range; // // Methods // Iterator( int64_t value_ = 0, int64_t increment_ = 1, Range const *range_ = nullptr ): value(value_), increment(increment_), range(range_) { } Iterator & operator++() { value += increment; return *this; } Iterator operator++(int) { Iterator self(*this); ++(*this); return self; } bool operator==(Iterator const &it) const { return value == it.value; } bool operator!=(Iterator const &it) const { return !(*this == it); } static int64_t round(int64_t value, int64_t divisible) { int64_t rem = (value % divisible); // Round either up or down if (rem > divisible / 2) { value += (divisible - rem); } else { value -= rem; } return value; } int64_t at() const { if (!range) { return value; } switch (range->mode) { case Mode::kSequence: return value; case Mode::kRandom: { double rnd = double(range->minimum) + double(std::rand()) / double(RAND_MAX) * (double(range->maximum) - double(range->minimum)); int64_t value = int64_t(rnd); return round(value, range->divisible); } break; case Mode::kRandomLog2: { double lg2_minimum = std::log(double(range->minimum)) / std::log(2.0); double lg2_maximum = std::log(double(range->maximum)) / std::log(2.0); double rnd = lg2_minimum + double(std::rand()) / double(RAND_MAX) * (lg2_maximum - lg2_minimum); int64_t value = int64_t(std::pow(2.0, rnd)); return round(value, range->divisible); } break; default: break; } return value; } int64_t operator*() const { return at(); } }; // // Data members // int64_t first; ///< first element in range int64_t last; ///< last element in range int64_t increment; ///< additive increment between values Mode mode; ///< mode selection enables alternative values int64_t minimum; ///< minimum value to return int64_t maximum; ///< maximum value to return int64_t divisible; ///< rounds value down to an integer multiple of this value // // Methods // /// Default constructor - range acts as a scalar Range(int64_t first_ = 0): first(first_), last(first_), increment(1), mode(Mode::kSequence), minimum(0), maximum(0), divisible(1) { } /// Range acts as a range Range( int64_t first_, int64_t last_, int64_t increment_ = 1, Mode mode_ = Mode::kSequence, int64_t minimum_ = 0, int64_t maximum_ = 0, int64_t divisible_ = 1 ): first(first_), last(last_), increment(increment_), mode(mode_), minimum(minimum_), maximum(maximum_), divisible(divisible_) { // Helpers to avoid constructing invalid ranges if (increment > 0) { if (last < first) { std::swap(last, first); } } else if (increment < 0) { if (first < last) { std::swap(last, first); } } else if (last != first) { last = first; increment = 1; } } /// Helper to construct a sequence range static Range Sequence(int64_t first_, int64_t last_, int64_t increment_ = 1) { return Range(first_, last_, increment_, Mode::kSequence); } /// Helper to construct a range that is a random distribution static Range Random(int64_t minimum_, int64_t maximum_, int64_t count_, int64_t divisible_ = 1) { return Range(1, count_, 1, Mode::kRandom, minimum_, maximum_, divisible_); } /// Helper to construct a range that is a random distribution over a log scale static Range RandomLog2(int64_t minimum_, int64_t maximum_, int64_t count_, int64_t divisible_ = 1) { return Range(1, count_, 1, Mode::kRandomLog2, minimum_, maximum_, divisible_); } /// Returns an iterator to the first element within the range Iterator begin() const { return Iterator(first, increment, this); } /// Returns an iterator to the first element *after* the range Iterator end() const { return Iterator(first + ((last - first)/increment + 1) * increment, increment, this); } }; /// Integer-valued argument - represented as a list of integer-valued ranges struct IntegerArgument : public KernelArgument { // // Type definitions // /// Value type struct IntegerValue : public KernelArgument::Value { int64_t value; // // Methods // IntegerValue( int64_t value_ = 0, IntegerArgument const *argument_ = nullptr, bool not_null_ = true ); /// Pretty printer for debugging virtual std::ostream &print(std::ostream &out) const; }; /// Collection of ranges represent the IntegerArgument's state using RangeCollection = std::vector<Range>; /// Abstract base class to iterate over values within arguments struct IntegerValueIterator : public KernelArgument::ValueIterator { // // Data members // RangeCollection::const_iterator range_it; Range::Iterator value_it; // // Methods // IntegerValueIterator(); IntegerValueIterator(IntegerArgument const *argument); virtual void operator++(); virtual bool operator==(ValueIterator const &it) const; /// Gets the value pointed to virtual std::unique_ptr<KernelArgument::Value> at() const; }; // // Data members // /// Set of possible values RangeCollection ranges; // // Methods // /// Default ctor IntegerArgument( ArgumentDescription const *description ): KernelArgument(description) { } virtual bool not_null() const { bool _not_null = !ranges.empty(); return _not_null; } virtual std::unique_ptr<KernelArgument::ValueIterator> begin() const; virtual std::unique_ptr<KernelArgument::ValueIterator> end() const; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure defining the data type of tensors struct TensorArgument : public KernelArgument { // // Type definitions // struct TensorDescription { /// Data type of elements library::NumericTypeID element; /// Layout definition library::LayoutTypeID layout; /// Computed extent std::vector<int> extent; /// Enables directly specifying stride value used to size tensor std::vector<int> stride; // // Methods // TensorDescription( library::NumericTypeID element_ = library::NumericTypeID::kUnknown, library::LayoutTypeID layout_ = library::LayoutTypeID::kUnknown, std::vector<int> extent_ = std::vector<int>(), std::vector<int> stride_ = std::vector<int>() ): element(element_), layout(layout_), extent(extent_), stride(stride_) {} }; using ValueCollection = std::vector<TensorDescription>; /// Value structure struct TensorValue : public KernelArgument::Value { TensorDescription desc; // // Methods // TensorValue( TensorDescription const &desc_ = TensorDescription(), TensorArgument const *argument_ = nullptr, bool not_null_ = true ); /// Pretty printer for debugging virtual std::ostream &print(std::ostream &out) const; }; /// Abstract base class to iterate over values within arguments struct TensorValueIterator : public KernelArgument::ValueIterator { // // Data members // ValueCollection::const_iterator value_it; // // Methods // explicit TensorValueIterator(TensorArgument const *argument_); virtual void operator++(); virtual bool operator==(ValueIterator const &it) const; /// Gets the value pointed to virtual std::unique_ptr<KernelArgument::Value> at() const; }; /// Set of possible values ValueCollection values; // // Methods // /// Default ctor explicit TensorArgument( ArgumentDescription const *description ): KernelArgument(description) { } virtual bool not_null() const { return !values.empty(); } virtual std::unique_ptr<KernelArgument::ValueIterator> begin() const; virtual std::unique_ptr<KernelArgument::ValueIterator> end() const; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Numeric data type struct EnumeratedTypeArgument : public KernelArgument { // // Type definitions // struct EnumeratedTypeValue : public KernelArgument::Value { /// Data type of element std::string element; // // Methods // EnumeratedTypeValue( std::string const &element_ = std::string(), EnumeratedTypeArgument const *argument_ = nullptr, bool not_null_ = true ); /// Pretty printer for debugging virtual std::ostream &print(std::ostream &out) const; }; using ValueCollection = std::vector<decltype(EnumeratedTypeValue::element)>; /// Abstract base class to iterate over values within arguments struct EnumeratedTypeValueIterator : public KernelArgument::ValueIterator { // // Data members // ValueCollection::const_iterator value_it; // // Methods // explicit EnumeratedTypeValueIterator(EnumeratedTypeArgument const *argument_ = nullptr); virtual void operator++(); virtual bool operator==(ValueIterator const &it) const; /// Gets the value pointed to virtual std::unique_ptr<KernelArgument::Value> at() const; }; // // Data members // ValueCollection values; // // Members // /// Default ctor explicit EnumeratedTypeArgument(ArgumentDescription const *description): KernelArgument(description) {} virtual bool not_null() const { return !values.empty(); } virtual std::unique_ptr<KernelArgument::ValueIterator> begin() const; virtual std::unique_ptr<KernelArgument::ValueIterator> end() const; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Object storing the space argument values class ProblemSpace { public: /// Tuple of arguments using Problem = std::vector<std::unique_ptr<KernelArgument::Value>>; /// Type used to iterator over things using IteratorVector = std::vector<std::unique_ptr<KernelArgument::ValueIterator>>; /// Iterates over points in the design space class Iterator { private: /// One iterator per argument IteratorVector iterators; public: // // Methods // explicit Iterator(); Iterator(ProblemSpace const &problem_space); Iterator(Iterator &&it); // Rule of three Iterator(Iterator const &) = delete; Iterator &operator=(Iterator const &it) = delete; ~Iterator() = default; /// Pre-increment - advances to next point in argument range void operator++(); /// Gets the current argument value Problem at() const; /// Moves iterator to end void move_to_end(); /// Equality operator bool operator==(Iterator const &it) const; /// Inequality operator bool operator!=(Iterator const &it) const { return !(*this == it); } /// Helper to call at() method Problem operator*() const { return at(); } /// Helper to print iterator state std::ostream & print(std::ostream &out) const; private: /// Helper for recursively constructing iterators void construct_(KernelArgument const *argument); }; public: // // Data members // KernelArgumentVector arguments; /// Map of argument names to their position within the argument vector std::unordered_map<std::string, size_t> argument_index_map; public: // // Methods // /// Default ctor ProblemSpace() = default; /// Constructs a problem space from a vector of arguments. This vector must outlive /// the ProblemSpace object, which stores pointers to objects within the /// ArgumentDescriptionVector. ProblemSpace(ArgumentDescriptionVector const &schema, CommandLine const &cmdline); Iterator begin() const; // returns an iterator to the first point in the range Iterator end() const; // returns an iterator to the first point after the range /// Returns the index of an argument by name size_t argument_index(char const *name) const; /// Gets all argument names as an ordered vector std::vector<std::string> argument_names() const; /// Returns the number of dimensions of the problem space size_t rank() const { return arguments.size(); } private: /// Helper for recursively cloning void clone_( KernelArgumentVector &kernel_args, ArgumentDescription const *arg_desc); /// Parses command line argument void parse_( KernelArgument *arg, CommandLine const &cmdline); }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Lexically casts an argument to an int if it is defined. Returns true if not null. bool arg_as_int(int &int_value, KernelArgument::Value const *value_ptr); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_int(int64_t &int_value, KernelArgument::Value const *value_ptr); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_int( int &int_value, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_int( int64_t &int_value, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_NumericTypeID(library::NumericTypeID &numeric_type, KernelArgument::Value const *value_ptr); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_NumericTypeID( library::NumericTypeID &numeric_type, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_LayoutTypeID(library::LayoutTypeID &layout_type, KernelArgument::Value const *value_ptr); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_LayoutTypeID( library::LayoutTypeID &layout_type, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_OpcodeClassID(library::OpcodeClassID &opcode_class, KernelArgument::Value const *value_ptr); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_OpcodeClassID( library::OpcodeClassID &opcode_class, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_SplitKModeID(library::SplitKMode &split_k_mode, KernelArgument::Value const *value_ptr); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_SplitKModeID( library::SplitKMode &split_k_mode, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_ConvModeID(library::ConvModeID &conv_mode, KernelArgument::Value const *value_ptr); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_ConvModeID( library::ConvModeID &conv_mode, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_IteratorAlgorithmID(library::IteratorAlgorithmID &iterator_algorithm, KernelArgument::Value const *value_ptr); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_IteratorAlgorithmID( library::IteratorAlgorithmID &iterator_algorithm, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_RasterOrder(library::RasterOrder &raster_order, KernelArgument::Value const *value_ptr); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_RasterOrder( library::RasterOrder &raster_order, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_ProviderID(library::Provider &provider, KernelArgument::Value const *value_ptr); /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_ProviderID( library::Provider &provider, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Lexically casts an argument to a given type stored in a byte array. Returns true if not null. bool arg_as_scalar( std::vector<uint8_t> &bytes, library::NumericTypeID numeric_type, KernelArgument::Value const *value_ptr); /// Lexically casts an argument to a given type stored in a byte array. Returns true if not null. bool arg_as_scalar( std::vector<uint8_t> &bytes, library::NumericTypeID numeric_type, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Returns true if a tensor description satisfies a `tensor` value bool tensor_description_satisfies( library::TensorDescription const &tensor_desc, TensorArgument::TensorValue const *value_ptr); /// Returns true if a tensor description satisfies a `tensor` value bool tensor_description_satisfies( library::TensorDescription const &tensor_desc, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Returns true if a conv kind satisfies the value bool conv_kind_satisfies( library::ConvKind const &conv_kind, EnumeratedTypeArgument::EnumeratedTypeValue const *value_ptr); /// Returns true if a conv kind satisfies the value bool conv_kind_satisfies( library::ConvKind const &conv_kind, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Returns true if a iterator algorithm satisfies the value bool iterator_algorithm_satisfies( library::IteratorAlgorithmID const &iterator_algorithm, EnumeratedTypeArgument::EnumeratedTypeValue const *value_ptr); /// Returns true if a iterator algorithm satisfies the value bool iterator_algorithm_satisfies( library::IteratorAlgorithmID const &iterator_algorithm, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////////////////////
tools/profiler/include/cutlass/profiler/problem_space.h/0
{ "file_path": "tools/profiler/include/cutlass/profiler/problem_space.h", "repo_id": "tools", "token_count": 8773 }
59
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Contains code for debugging cutlass code */ #pragma once #include "device_dump.h" //////////////////////////////////////////////////////////////////////////////////////////////////// /****************************************************************************** * Debug and logging macros ******************************************************************************/ /** * Formats and prints the given message to stdout */ #if !defined(CUDA_LOG) #if !defined(__CUDA_ARCH__) #define CUDA_LOG(format, ...) printf(format, __VA_ARGS__) #else #define CUDA_LOG(format, ...) \ printf("[block (%d,%d,%d), thread (%d,%d,%d)]: " format, \ blockIdx.x, \ blockIdx.y, \ blockIdx.z, \ threadIdx.x, \ threadIdx.y, \ threadIdx.z, \ __VA_ARGS__); #endif #endif /** * Formats and prints the given message to stdout only if DEBUG is defined */ #if !defined(CUDA_LOG_DEBUG) #ifdef DEBUG #define CUDA_LOG_DEBUG(format, ...) CUDA_LOG(format, __VA_ARGS__) #else #define CUDA_LOG_DEBUG(format, ...) #endif #endif /** * \brief The corresponding error message is printed to \p stderr (or \p stdout in device code) * along with the supplied source context. * * \return The CUDA error. */ __host__ CUTLASS_DEVICE cudaError_t cuda_perror_impl(cudaError_t error, const char* expression, const char* filename, int line) { (void)filename; (void)line; if (error) { #if !defined(__CUDA_ARCH__) fprintf( stderr, "CUDA error %d [%s, %d] in expression '%s': %s\n", error, filename, line, expression, cudaGetErrorString(error)); fflush(stderr); #else printf("CUDA error %d [%s, %d] in expression '%s'\n", error, filename, line, expression); #endif } return error; } /** * \brief Perror macro */ #ifndef CUDA_PERROR #define CUDA_PERROR(e) cuda_perror_impl((cudaError_t)(e), #e, __FILE__, __LINE__) #endif /** * \brief Perror macro with exit */ #ifndef CUDA_PERROR_EXIT #define CUDA_PERROR_EXIT(e) \ do { if (cuda_perror_impl((cudaError_t)(e), #e, __FILE__, __LINE__)) { \ exit(1); \ } } while (0) #endif /** * \brief Perror macro only if DEBUG is defined */ #ifndef CUDA_PERROR_DEBUG #ifdef DEBUG #define CUDA_PERROR_DEBUG(e) CUDA_PERROR(e) #else #define CUDA_PERROR_DEBUG(e) (e) #endif #endif //////////////////////////////////////////////////////////////////////////////////////////////////// // A small helper class to dump a type at compile time // Usage:: DumpType<Class>::Class template <typename T> struct DebugType {}; template <typename T> void DebugTypeFunc(T const& t) { T::t; } // A small helper class to dump a compile time constant at compile time // Usage: DumpValue<Class::kConstant>::kConstant template <int Value> struct DebugValue {};
tools/util/include/cutlass/util/debug.h/0
{ "file_path": "tools/util/include/cutlass/util/debug.h", "repo_id": "tools", "token_count": 1956 }
60
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once /*! \file \brief HostTensor contributes management for both host and device memory. HostTensor allocates host and device memory upon construction. Basic element-wise operations on host memory synchronize device memory automatically. Explicit copy operations provide abstractions for CUDA memcpy operations. Call {host, device}_{data, ref, view}() for accessing host or device memory. See cutlass/tensor_ref.h and cutlass/tensor_view.h for more details. */ #include <vector> #include "cutlass/cutlass.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/fast_math.h" #include "device_memory.h" namespace cutlass { /////////////////////////////////////////////////////////////////////////////////////////////////// /// Host tensor template < /// Data type of element stored within tensor (concept: NumericType) typename Element_, /// Defines a mapping from logical coordinate to linear memory (concept: Layout) typename Layout_ > class HostTensor { public: /// Data type of individual access using Element = Element_; /// Mapping function from logical coordinate to linear memory using Layout = Layout_; /// Logical rank of tensor index space static int const kRank = Layout::kRank; /// Index type using Index = typename Layout::Index; /// Long index used for pointer offsets using LongIndex = typename Layout::LongIndex; /// Coordinate in logical tensor space using TensorCoord = typename Layout::TensorCoord; /// Layout's stride vector using Stride = typename Layout::Stride; /// Tensor reference to device memory using TensorRef = TensorRef<Element, Layout>; /// Tensor reference to constant device memory using ConstTensorRef = typename TensorRef::ConstTensorRef; /// Tensor reference to device memory using TensorView = TensorView<Element, Layout>; /// Tensor reference to constant device memory using ConstTensorView = typename TensorView::ConstTensorView; /// Reference to element in tensor using Reference = typename TensorRef::Reference; /// Constant reference to element in tensor using ConstReference = typename ConstTensorRef::Reference; private: using StorageUnit = typename platform::conditional_t<std::is_same_v<Element, bool>, uint8_t, // Avoid the std::vector<bool> specialization typename platform::conditional_t<sizeof_bits<Element>::value % 8 == 0, // Handle subbyte types Element, uint8_t>>; using StorageContainerCalculator = cutlass::detail::StorageContainerCalculator<Element, StorageUnit>; static constexpr int kContainerTypeNumBits = StorageContainerCalculator::kContainerTypeNumBits; static constexpr int kContainerTypeNumLogicalElements = StorageContainerCalculator::kContainerTypeNumLogicalElements; static constexpr int kContainerTypeNumBytes = StorageContainerCalculator::kContainerTypeNumBytes; static constexpr int kContainerTypeNumStorageUnit = StorageContainerCalculator::kContainerTypeNumStorageUnit; // // Data members // /// Extent of tensor in logical dimensions TensorCoord extent_; /// Layout object Layout layout_; /// Host-side memory allocation std::vector<StorageUnit> host_; /// Device-side memory device_memory::allocation<StorageUnit> device_; /// number of containers size_t count_to_container_storage_unit_count(size_t count) { return (count + kContainerTypeNumLogicalElements - 1) / kContainerTypeNumLogicalElements * kContainerTypeNumStorageUnit; } public: // // Device and Host Methods // /// Default constructor HostTensor() {} /// Constructs a tensor given an extent. Assumes a packed layout HostTensor( TensorCoord const &extent, bool device_backed = true ) { this->reset(extent, Layout::packed(extent), device_backed); } /// Constructs a tensor given an extent and layout HostTensor( TensorCoord const &extent, Layout const &layout, bool device_backed = true ) { this->reset(extent, layout, device_backed); } ~HostTensor() { } /// Clears the HostTensor allocation to size/capacity = 0 void reset() { extent_ = TensorCoord(); layout_ = Layout::packed(extent_); host_.clear(); device_.reset(); } /// Resizes internal memory allocations without affecting layout or extent void reserve( size_t count, ///< size of tensor in elements bool device_backed_ = true) { ///< if true, device memory is also allocated device_.reset(); host_.clear(); size_t count_container = count_to_container_storage_unit_count(count); host_.resize(count_container); // Allocate memory StorageUnit* device_memory = nullptr; if (device_backed_) { device_memory = device_memory::allocate<StorageUnit>(count_container); } device_.reset(device_memory, device_backed_ ? count_container : 0); } /// Updates the extent and layout of the HostTensor. Allocates memory according to the new /// extent and layout. void reset( TensorCoord const &extent, ///< extent of logical tensor Layout const &layout, ///< layout object of tensor bool device_backed_ = true) { ///< if true, device memory is also allocated. extent_ = extent; layout_ = layout; reserve(size_t(layout_.capacity(extent_)), device_backed_); } /// Updates the extent and layout of the HostTensor. Allocates memory according to the new /// extent and layout. Assumes a packed tensor configuration. void reset( TensorCoord const &extent, ///< extent of logical tensor bool device_backed_ = true) { ///< if true, device memory is also allocated. reset(extent, Layout::packed(extent), device_backed_); } /// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity. /// To force allocation, call reset(). void resize( TensorCoord const &extent, ///< extent of logical tensor Layout const &layout, ///< layout object of tensor bool device_backed_ = true) { ///< if true, device memory is also allocated. extent_ = extent; layout_ = layout; LongIndex new_size = size_t(layout_.capacity(extent_)); LongIndex new_size_container = count_to_container_storage_unit_count((layout_.capacity(extent_))); if (static_cast<decltype(host_.size())>(new_size_container) > host_.size()) { reserve(new_size, device_backed_); } } /// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity. /// To force allocation, call reset(). Note, this form of resize() assumes a packed tensor configuration. void resize( TensorCoord const &extent, ///< extent of logical tensor bool device_backed_ = true) { ///< if true, device memory is also allocated. resize(extent, Layout::packed(extent), device_backed_); } /// Returns the logical number of elements stored in the host tensor size_t size() const { return layout_.capacity(extent_); } /// Returns the logical capacity in terms of number of elements. May be larger than the size(). LongIndex capacity() const { return host_.size() / kContainerTypeNumStorageUnit * kContainerTypeNumLogicalElements; } /// Gets pointer to host data Element * host_data() { return reinterpret_cast<Element *>(host_.data()); } /// Gets pointer to host data with a pointer offset Element * host_data_ptr_offset(LongIndex ptr_element_offset) { return &ReferenceFactory<Element>::get(host_data(), ptr_element_offset); } /// Gets a reference to an element in host memory Reference host_data(LongIndex idx) { return ReferenceFactory<Element>::get(host_data(), idx); } /// Gets pointer to host data Element const * host_data() const { return reinterpret_cast<Element const *>(host_.data()); } /// Gets pointer to host data with a pointer offset Element const * host_data_ptr_offset(LongIndex ptr_element_offset) const { return &ReferenceFactory<Element>::get(host_data(), ptr_element_offset); } /// Gets a constant reference to an element in host memory ConstReference host_data(LongIndex idx) const { return ReferenceFactory<Element const>::get(host_data(), idx); } /// Gets pointer to device data Element * device_data() { return reinterpret_cast<Element *>(device_.get()); } /// Gets pointer to device data Element const * device_data() const { return reinterpret_cast<Element const *>(device_.get()); } /// Gets pointer to device data with a pointer offset Element * device_data_ptr_offset(LongIndex ptr_element_offset) { return &ReferenceFactory<Element>::get(device_data(), ptr_element_offset); } /// Gets pointer to device data with a pointer offset Element const * device_data_ptr_offset(LongIndex ptr_element_offset) const { return &ReferenceFactory<Element>::get(device_data(), ptr_element_offset); } /// Accesses the tensor reference pointing to data TensorRef host_ref(LongIndex ptr_element_offset=0) { return TensorRef(host_data_ptr_offset(ptr_element_offset), layout_); } /// Accesses the tensor reference pointing to data ConstTensorRef host_ref(LongIndex ptr_element_offset=0) const { return ConstTensorRef(host_data_ptr_offset(ptr_element_offset), layout_); } /// Accesses the tensor reference pointing to data TensorRef device_ref(LongIndex ptr_element_offset=0) { return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_); } /// Accesses the tensor reference pointing to data ConstTensorRef device_ref(LongIndex ptr_element_offset=0) const { return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_); } /// Accesses the tensor reference pointing to data TensorView host_view(LongIndex ptr_element_offset=0) { return TensorView(host_data_ptr_offset(ptr_element_offset), layout_, extent_); } /// Accesses the tensor reference pointing to data ConstTensorView host_view(LongIndex ptr_element_offset=0) const { return ConstTensorView(host_data_ptr_offset(ptr_element_offset), layout_, extent_); } /// Accesses the tensor reference pointing to data TensorView device_view(LongIndex ptr_element_offset=0) { return TensorView(device_data_ptr_offset(ptr_element_offset), layout_, extent_); } /// Accesses the tensor reference pointing to data ConstTensorView device_view(LongIndex ptr_element_offset=0) const { return ConstTensorView(device_data_ptr_offset(ptr_element_offset), layout_, extent_); } /// Returns true if device memory is allocated bool device_backed() const { return (device_.get() == nullptr) ? false : true; } /// Returns the layout object Layout & layout() { return layout_; } /// Returns the layout object Layout layout() const { return layout_; } /// Returns the layout object's stride vector Stride stride() const { return layout_.stride(); } /// Returns the layout object's stride vector Stride & stride() { return layout_.stride(); } /// Returns the layout object's stride in a given physical dimension LongIndex stride(int dim) const { return layout_.stride().at(dim); } /// Returns the layout object's stride in a given physical dimension LongIndex & stride(int dim) { return layout_.stride().at(dim); } /// Computes the offset of an index from the origin of the tensor LongIndex offset(TensorCoord const& coord) const { return layout_(coord); } /// Returns a reference to the element at the logical Coord in host memory Reference at(TensorCoord const& coord) { return host_data(offset(coord)); } /// Returns a const reference to the element at the logical Coord in host memory ConstReference at(TensorCoord const& coord) const { return host_data(offset(coord)); } /// Returns the extent of the tensor TensorCoord extent() const { return extent_; } /// Returns the extent of the tensor TensorCoord & extent() { return extent_; } /// Copies data from device to host void sync_host() { if (device_backed()) { device_memory::copy_to_host( host_.data(), device_.get(), device_.size()); } } /// Copies data from host to device void sync_device() { if (device_backed()) { device_memory::copy_to_device( device_.get(), host_.data(), host_.capacity()); } } /// Copy data from a caller-supplied device pointer into host memory. void copy_in_device_to_host( Element const* ptr_device, ///< source device memory LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } size_t container_count = count_to_container_storage_unit_count(count); device_memory::copy_to_host( host_.data(), reinterpret_cast<StorageUnit const *>(ptr_device), container_count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_in_device_to_device( Element const* ptr_device, ///< source device memory LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } size_t container_count = count_to_container_storage_unit_count(count); device_memory::copy_device_to_device( device_.get(), reinterpret_cast<StorageUnit const *>(ptr_device), container_count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_in_host_to_device( Element const* ptr_host, ///< source host memory LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } size_t container_count = count_to_container_storage_unit_count(count); device_memory::copy_to_device( device_.get(), reinterpret_cast<StorageUnit const *>(ptr_host), container_count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_in_host_to_host( Element const* ptr_host, ///< source host memory LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } size_t container_count = count_to_container_storage_unit_count(count); device_memory::copy_host_to_host( host_.data(), reinterpret_cast<StorageUnit const *>(ptr_host), container_count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_out_device_to_host( Element * ptr_host, ///< source device memory LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } size_t container_count = count_to_container_storage_unit_count(count); device_memory::copy_to_host( reinterpret_cast<StorageUnit *>(ptr_host), device_.get(), container_count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_out_device_to_device( Element * ptr_device, ///< source device memory LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } size_t container_count = count_to_container_storage_unit_count(count); device_memory::copy_device_to_device( reinterpret_cast<StorageUnit *>(ptr_device), device_.get(), container_count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_out_host_to_device( Element * ptr_device, ///< source host memory LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } size_t container_count = count_to_container_storage_unit_count(count); device_memory::copy_to_device( reinterpret_cast<StorageUnit *>(ptr_device), host_.data(), container_count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_out_host_to_host( Element * ptr_host, ///< source host memory LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } size_t container_count = count_to_container_storage_unit_count(count); device_memory::copy_host_to_host( reinterpret_cast<StorageUnit *>(ptr_host), host_.data(), container_count); } }; /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass
tools/util/include/cutlass/util/host_tensor.h/0
{ "file_path": "tools/util/include/cutlass/util/host_tensor.h", "repo_id": "tools", "token_count": 6274 }
61
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Reference implementation for complex-valued GEMM in device-side code. */ #pragma once #include "cutlass/blas3.h" #include "cutlass/complex.h" #include "cutlass/numeric_conversion.h" #include "cutlass/tensor_view.h" #include "cutlass/gemm/gemm.h" namespace cutlass { namespace reference { namespace device { //////////////////////////////////////////////////////////////////////////////////////////////////// namespace kernel { /// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef /// objects. /// /// Explicitly naming types needed by this template can be cumbersome, particularly for the /// accumulator type, so a function argument 'initial_accum' is exposed. Passing /// AccumulatorType(0) as the last function argument can be easier than naming all template /// arguments explicitly. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ScalarType, typename ComputeType, typename ConvertOp = NumericConverter<ElementC, ScalarType>, typename InnerProductOp = multiply_add<ComputeType>, int kMblock = 4, int kNblock = 4 > __global__ void Rank2KComplex( gemm::GemmCoord problem_size, ScalarType alpha, TensorRef<ElementA, LayoutA> tensor_a, ComplexTransform transform_a, TensorRef<ElementB, LayoutB> tensor_b, ComplexTransform transform_b, ScalarType beta, TensorRef<ElementC, LayoutC> tensor_c, TensorRef<ElementC, LayoutC> tensor_d, ComputeType initial_accum, FillMode fill_mode_c, BlasMode blas_mode, int batch_count = 1, int64_t batch_stride_A = 0, int64_t batch_stride_B = 0, int64_t batch_stride_C = 0, int64_t batch_stride_D = 0) { static_assert( LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, "Tensors must be of rank 2"); int const M = problem_size.m(); int const N = problem_size.n(); int const K = problem_size.k(); assert(M=N); ConvertOp convert_op; InnerProductOp inner_product_op; int row_block = (blockIdx.x * blockDim.x + threadIdx.x) * kMblock; int col_block = (blockIdx.y * blockDim.y + threadIdx.y) * kNblock; int batch_idx = blockIdx.z; tensor_a.add_pointer_offset(batch_idx * batch_stride_A); tensor_b.add_pointer_offset(batch_idx * batch_stride_B); tensor_c.add_pointer_offset(batch_idx * batch_stride_C); tensor_d.add_pointer_offset(batch_idx * batch_stride_D); for (; batch_idx < batch_count; batch_idx += gridDim.z) { // Compute matrix product using blocks ComputeType accum[kMblock][kNblock]; CUTLASS_PRAGMA_UNROLL for (int j = 0; j < kNblock; j++) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kMblock; i++) { accum[i][j] = initial_accum; } } for (int k_block = 0; k_block < K; ++k_block) { CUTLASS_PRAGMA_UNROLL for (int j = 0; j < kNblock; j++) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kMblock; i++) { int row = row_block + i; int col = col_block + j; if (row < M && col < N && ( (fill_mode_c == FillMode::kLower && row >= col) || (fill_mode_c == FillMode::kUpper && row <= col) ) ) { // A x B^T (Symmetric) or A x B^H (Hermitian) // complex conjugation on operandB (b_t) is function of blas3 computation ElementA a = tensor_a.at(MatrixCoord(row, k_block)); ElementB b_t = (blas_mode == BlasMode::kHermitian) ? conj(tensor_b.at(MatrixCoord(col, k_block))) : tensor_b.at(MatrixCoord(col, k_block)); ComputeType a_ik = ComputeType(a); ComputeType b_jk = ComputeType(b_t); // complex conjugation is a function of operand layouts if (transform_a == ComplexTransform::kConjugate) { a_ik = conj(a_ik); } // complex conjugation is a function of operand layouts if (transform_b == ComplexTransform::kConjugate) { b_jk = conj(b_jk); } accum[i][j] = inner_product_op(a_ik, b_jk, accum[i][j]); // B x A^T (Symmetric) or B x A^H (Hermitian) // complex conjugation on operandB (a_t) is function of blas3 computation ElementB b = tensor_b.at(MatrixCoord(row, k_block)); ElementA a_t = (blas_mode == BlasMode::kHermitian) ? conj(tensor_a.at(MatrixCoord(col, k_block))): tensor_a.at(MatrixCoord(col, k_block)); ComputeType b_ik = ComputeType(b); ComputeType a_jk = ComputeType(a_t); // complex conjugation here is a function of operand layouts if (transform_b == ComplexTransform::kConjugate) { b_ik = conj(b_ik); } // complex conjugation here is a function of operand layouts if (transform_a == ComplexTransform::kConjugate) { a_jk = conj(a_jk); } accum[i][j] = inner_product_op(a_ik, b_kj, accum[i][j]); } } } } CUTLASS_PRAGMA_UNROLL for (int j = 0; j < kNblock; j++) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kMblock; i++) { int row = row_block + i; int col = col_block + j; MatrixCoord coord = MatrixCoord(row, col); if (row < M && col < N && ((fill_mode_c == FillMode::kLower && row >= col) || (fill_mode_c == FillMode::kUpper && row <= col)) ) { ScalarType c = tensor_c.at(coord); // The imaginary parts of the diagonal elements of // a complex data type are assumed and set to zero if (blas_mode == BlasMode::kHermitian) { c = (row == col) ? real(c) : c; } tensor_d.at(coord) = convert_op( alpha * ScalarType(accum[i][j]) + beta * c); } } } tensor_a.add_pointer_offset(batch_stride_A * gridDim.z); tensor_b.add_pointer_offset(batch_stride_B * gridDim.z); tensor_c.add_pointer_offset(batch_stride_C * gridDim.z); tensor_d.add_pointer_offset(batch_stride_D * gridDim.z); } // for (batch_idx) } } // namespace kernel //////////////////////////////////////////////////////////////////////////////////////////////////// /// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef /// objects. /// /// Explicitly naming types needed by this template can be cumbersome, particularly for the /// accumulator type, so a function argument 'initial_accum' is exposed. Passing /// AccumulatorType(0) as the last function argument can be easier than naming all template /// arguments explicitly. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ScalarType, typename ComputeType, typename ConvertOp = NumericConverter<ElementC, ScalarType>, typename InnerProductOp = multiply_add<ComputeType> > void Rank2KComplex( gemm::GemmCoord problem_size, ScalarType alpha, TensorRef<ElementA, LayoutA> tensor_a, ComplexTransform transform_a, TensorRef<ElementB, LayoutB> tensor_b, ComplexTransform transform_b, ScalarType beta, TensorRef<ElementC, LayoutC> tensor_c, TensorRef<ElementC, LayoutC> tensor_d, ComputeType initial_accum, FillMode fill_mode_c, BlasMode blas_mode, int batch_count = 1, int64_t batch_stride_A = 0, int64_t batch_stride_B = 0, int64_t batch_stride_C = 0, int64_t batch_stride_D = 0) { static_assert( LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2, "Tensors must be of rank 2"); int const kMblock = 4; int const kNblock = 4; dim3 block(16, 8); dim3 grid( (problem_size.m() + block.x * kMblock - 1) / (block.x * kMblock), (problem_size.n() + block.y * kNblock - 1) / (block.y * kNblock), batch_count % std::numeric_limits<uint16_t>::max() ); kernel::Rank2KComplex< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType, ComputeType, ConvertOp, InnerProductOp, kMblock, kNblock ><<< grid, block >>>( problem_size, alpha, tensor_a, transform_a, tensor_b, transform_b, beta, tensor_c, tensor_d, initial_accum, fill_mode_c, blas_mode, batch_count, batch_stride_A, batch_stride_B, batch_stride_C, batch_stride_D ); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef /// objects. /// /// This assumes the accumulator type is the same type as the scalars. template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ScalarType > void Rank2KComplex( gemm::GemmCoord problem_size, ScalarType alpha, TensorRef<ElementA, LayoutA> tensor_a, ComplexTransform transform_a, TensorRef<ElementB, LayoutB> tensor_b, ComplexTransform transform_b, ScalarType beta, TensorRef<ElementC, LayoutC> tensor_c, TensorRef<ElementC, LayoutC> tensor_d, FillMode fill_mode_c, BlasMode blas_mode) { Rank2KComplex( problem_size, alpha, tensor_a, transform_a, tensor_b, transform_b, beta, tensor_c, tensor_d, ScalarType(0), fill_mode_c, blas_mode); } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace reference } // namespace cutlass
tools/util/include/cutlass/util/reference/device/rank_2k_complex.h/0
{ "file_path": "tools/util/include/cutlass/util/reference/device/rank_2k_complex.h", "repo_id": "tools", "token_count": 4590 }
62
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Reference implementation for complex-valued Rank 2K update in host-side code. */ #pragma once #include "cutlass/blas3.h" #include "cutlass/complex.h" #include "cutlass/numeric_conversion.h" #include "cutlass/tensor_view.h" #include "cutlass/gemm/gemm.h" #include <assert.h> namespace cutlass { namespace reference { namespace host { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef /// objects. /// /// Explicitly naming types needed by this template can be cumbersome, particularly for the /// accumulator type, so a function argument 'initial_accum' is exposed. Passing /// AccumulatorType(0) as the last function argument can be easier than naming all template /// arguments explicitly. template < typename ElementA, typename LayoutA, typename ElementC, typename LayoutC, typename ScalarType, typename ComputeType, typename ConvertOp = NumericConverter<ElementC, ScalarType>, typename InnerProductOp = multiply_add<ComputeType> > void Rank2KComplex( gemm::GemmCoord problem_size, ScalarType alpha, TensorRef<ElementA, LayoutA> tensor_a, ComplexTransform transform_a, ScalarType beta, TensorRef<ElementC, LayoutC> tensor_c, TensorRef<ElementC, LayoutC> tensor_d, ComputeType initial_accum, FillMode fill_mode_c, BlasMode blas_mode, int batch_count = 1, int64_t batch_stride_A = 0, int64_t batch_stride_C = 0, int64_t batch_stride_D = 0) { static_assert( LayoutA::kRank == 2 && LayoutC::kRank == 2, "Tensors must be of rank 2"); // Note: batch is ignored. int const M = problem_size.m(); int const N = problem_size.n(); int const K = problem_size.k(); // Rank2K update operates on A=NxK, B=NxK, and C=NxN assert(M==N); // Blocking necessary to speedup reference implementation int const Mblock = 16; int const Nblock = 16; ConvertOp convert_op; InnerProductOp inner_product_op; for (int batch_idx = 0; batch_idx < batch_count; ++batch_idx) { // Compute matrix product using blocks for (int row_block = 0; row_block < M; row_block += Mblock) { for (int col_block = 0; col_block < N; col_block += Nblock) { ComputeType accum[Mblock][Nblock]; for (int j = 0; j < Nblock; j++) { for (int i = 0; i < Mblock; i++) { accum[i][j] = initial_accum; } } for (int k_block = 0; k_block < K; ++k_block) { for (int j = 0; j < Nblock; j++) { for (int i = 0; i < Mblock; i++) { int row = row_block + i; int col = col_block + j; if (row < M && col < N && ( (fill_mode_c == FillMode::kLower && row >= col) || (fill_mode_c == FillMode::kUpper && row <= col) ) ) { // A x A^T (Symmetric) or A x A^H (Hermitian) // complex conjugation on operandB (a_t) (function of blas3 computation) ElementA a = tensor_a.at(MatrixCoord(row, k_block)); ElementA a_t = (blas_mode == BlasMode::kHermitian) ? conj(tensor_a.at(MatrixCoord(col, k_block))) : tensor_a.at(MatrixCoord(col, k_block)); ComputeType a_ik = ComputeType(a); ComputeType b_jk = ComputeType(a_t); // complex conjugation (function of input layouts) if (transform_a == ComplexTransform::kConjugate) { a_ik = conj(a_ik); } // complex conjugation (function of input layouts) if (transform_a == ComplexTransform::kConjugate) { b_jk = conj(b_jk); } accum[i][j] = inner_product_op(a_ik, b_jk, accum[i][j]); } } } } for (int j = 0; j < Nblock; j++) { for (int i = 0; i < Mblock; i++) { int row = row_block + i; int col = col_block + j; MatrixCoord coord = MatrixCoord(row, col); if (row < M && col < N && ((fill_mode_c == FillMode::kLower && row >= col) || (fill_mode_c == FillMode::kUpper && row <= col)) ) { ScalarType c = tensor_c.at(coord); // The imaginary parts of the diagonal elements of // a complex data type are assumed and set to zero if (blas_mode == BlasMode::kHermitian) { c = (row == col) ? real(c) : c; } ScalarType tmp_d = convert_op( alpha * ScalarType(accum[i][j]) + beta * c); if (blas_mode == BlasMode::kHermitian && row == col ) { tensor_d.at(coord) = real(tmp_d); } else { tensor_d.at(coord) = tmp_d; } } } } } // for (col_block) } // for (row_block) tensor_a.add_pointer_offset(batch_stride_A); tensor_c.add_pointer_offset(batch_stride_C); tensor_d.add_pointer_offset(batch_stride_D); } // for (batch_idx) } //////////////////////////////////////////////////////////////////////////////////////////////////// /// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef /// objects. /// /// This assumes the accumulator type is the same type as the scalars. template < typename ElementA, typename LayoutA, typename ElementC, typename LayoutC, typename ScalarType > void RankKComplex( gemm::GemmCoord problem_size, ScalarType alpha, TensorRef<ElementA, LayoutA> tensor_a, ComplexTransform transform_a, ScalarType beta, TensorRef<ElementC, LayoutC> tensor_c, TensorRef<ElementC, LayoutC> tensor_d, FillMode fill_mode_c, BlasMode blas_mode) { Rank2KComplex( problem_size, alpha, tensor_a, transform_a, beta, tensor_c, tensor_d, ScalarType(0), fill_mode_c, blas_mode); } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace host } // namespace reference } // namespace cutlass
tools/util/include/cutlass/util/reference/host/rank_k_complex.h/0
{ "file_path": "tools/util/include/cutlass/util/reference/host/rank_k_complex.h", "repo_id": "tools", "token_count": 3255 }
63
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Type traits for common CUDA types */ #pragma once #include <cublas_v2.h> #include <cuda_fp16.h> #include <stdint.h> #include "cutlass/numeric_types.h" #include "cutlass/complex.h" namespace cutlass { struct half_t; template <typename T> struct TypeTraits { typedef T host_type; typedef T device_type; static inline T remove_negative_zero(T x) { return x; } static inline T to_print(T x) { return x; } static inline device_type to_device(host_type x) { return x; } }; template <> struct TypeTraits<int8_t> { static cudaDataType_t const cublas_type = CUDA_R_8I; typedef int8_t host_type; typedef int8_t device_type; typedef int8_t integer_type; typedef uint8_t unsigned_type; static inline int8_t remove_negative_zero(int8_t x) { return x; } static inline int to_print(int8_t x) { return (int)x; } static inline device_type to_device(host_type x) { return x; } }; template <> struct TypeTraits<uint8_t> { static cudaDataType_t const cublas_type = CUDA_R_8I; typedef uint8_t host_type; typedef uint8_t device_type; typedef uint8_t integer_type; typedef uint8_t unsigned_type; static inline uint8_t remove_negative_zero(uint8_t x) { return x; } static inline uint32_t to_print(uint8_t x) { return (uint32_t)x; } static inline device_type to_device(host_type x) { return x; } }; template <> struct TypeTraits<int> { static cudaDataType_t const cublas_type = CUDA_R_32I; typedef int host_type; typedef int device_type; typedef int32_t integer_type; typedef uint32_t unsigned_type; static inline int32_t remove_negative_zero(int32_t x) { return x; } static inline int to_print(int x) { return x; } static inline device_type to_device(host_type x) { return x; } }; template <> struct TypeTraits<unsigned> { static cudaDataType_t const cublas_type = CUDA_R_32I; typedef unsigned host_type; typedef unsigned device_type; typedef uint32_t integer_type; typedef uint32_t unsigned_type; static inline uint32_t remove_negative_zero(uint32_t x) { return x; } static inline uint32_t to_print(uint32_t x) { return x; } static inline device_type to_device(host_type x) { return x; } }; template <> struct TypeTraits<int64_t> { static cudaDataType_t const cublas_type = CUDA_R_8I; typedef int64_t host_type; typedef int64_t device_type; typedef int64_t integer_type; typedef uint64_t unsigned_type; static inline int64_t remove_negative_zero(int64_t x) { return x; } static inline int64_t to_print(int64_t x) { return x; } static inline device_type to_device(host_type x) { return x; } }; template <> struct TypeTraits<uint64_t> { static cudaDataType_t const cublas_type = CUDA_R_8I; typedef uint64_t host_type; typedef uint64_t device_type; typedef uint64_t integer_type; typedef uint64_t unsigned_type; static inline uint64_t remove_negative_zero(uint64_t x) { return x; } static inline uint64_t to_print(uint64_t x) { return x; } static inline device_type to_device(host_type x) { return x; } }; template <> struct TypeTraits<half_t> { static cudaDataType_t const cublas_type = CUDA_R_16F; typedef half_t host_type; typedef half_t device_type; typedef int16_t integer_type; typedef uint16_t unsigned_type; static inline half_t remove_negative_zero(half_t x) { return (x.raw() == 0x8000 ? half_t::bitcast(0) : x); } static inline half_t to_print(half_t x) { return x; } static inline device_type to_device(half_t x) { return reinterpret_cast<device_type const &>(x); } }; template <> struct TypeTraits<float> { static cudaDataType_t const cublas_type = CUDA_R_32F; typedef float host_type; typedef float device_type; typedef int32_t integer_type; typedef uint32_t unsigned_type; static inline float remove_negative_zero(float x) { return x == -0.f ? 0.f : x; } static inline float to_print(float x) { return x; } static inline device_type to_device(host_type x) { return x; } }; template <> struct TypeTraits<double> { static cudaDataType_t const cublas_type = CUDA_R_64F; typedef double host_type; typedef double device_type; typedef int64_t integer_type; typedef uint64_t unsigned_type; static inline double remove_negative_zero(double x) { return x == -0.0 ? 0.0 : x; } static inline double to_print(double x) { return x; } static inline device_type to_device(host_type x) { return x; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // // Complex types // /////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct TypeTraits<complex<half> > { static cudaDataType_t const cublas_type = CUDA_C_16F; typedef complex<half_t> host_type; typedef complex<half> device_type; typedef int16_t integer_type; typedef uint16_t unsigned_type; static inline device_type to_device(complex<half> x) { return reinterpret_cast<device_type const &>(x); } }; template <> struct TypeTraits<complex<half_t> > { static cudaDataType_t const cublas_type = CUDA_C_16F; typedef complex<half_t> host_type; typedef complex<half> device_type; typedef int16_t integer_type; typedef uint16_t unsigned_type; static inline complex<half_t> remove_negative_zero(complex<half_t> x) { return complex<half_t>( real(x) == -0_hf ? 0_hf : real(x), imag(x) == -0_hf ? 0_hf : imag(x) ); } static inline complex<half_t> to_print(complex<half_t> x) { return x; } static inline device_type to_device(complex<half_t> x) { return reinterpret_cast<device_type const &>(x); } }; template <> struct TypeTraits<complex<float> > { static cudaDataType_t const cublas_type = CUDA_C_32F; typedef complex<float> host_type; typedef complex<float> device_type; typedef int64_t integer_type; typedef uint64_t unsigned_type; static inline complex<float> remove_negative_zero(complex<float> x) { return complex<float>( real(x) == -0.f ? 0.f : real(x), imag(x) == -0.f ? 0.f : imag(x) ); } static inline complex<float> to_print(complex<float> x) { return x; } static inline device_type to_device(complex<float> x) { return reinterpret_cast<device_type const &>(x); } }; template <> struct TypeTraits<complex<double> > { static cudaDataType_t const cublas_type = CUDA_C_64F; typedef complex<double> host_type; typedef complex<double> device_type; struct integer_type { int64_t real, imag; }; struct unsigned_type { uint64_t real, imag; }; static inline complex<double> remove_negative_zero(complex<double> x) { return complex<double>( real(x) == -0.0 ? 0.0 : real(x), imag(x) == -0.0 ? 0.0 : imag(x) ); } static inline complex<double> to_print(complex<double> x) { return x; } static inline device_type to_device(complex<double> x) { return reinterpret_cast<device_type const &>(x); } }; /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass
tools/util/include/cutlass/util/type_traits.h/0
{ "file_path": "tools/util/include/cutlass/util/type_traits.h", "repo_id": "tools", "token_count": 2954 }
64
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Demonstrate CUTLASS debugging tool for dumping fragments and shared memory */ /////////////////////////////////////////////////////////////////////////////////////////////////// // Standard Library includes #include <iostream> // // CUTLASS includes // #include "cutlass/aligned_buffer.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/predicated_tile_iterator.h" #include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h" #include "cutlass/util/debug.h" #include "cutlass/util/device_dump.h" #define EXAMPLE_MATRIX_ROW 64 #define EXAMPLE_MATRIX_COL 32 /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Element, typename GmemIterator, typename SmemIterator> __global__ void kernel_dump(typename GmemIterator::Params params, typename GmemIterator::TensorRef ref) { extern __shared__ Element shared_storage[]; // Construct the global iterator and load the data to the fragments. int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x; GmemIterator gmem_iterator(params, ref.data(), {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL}, tb_thread_id); typename GmemIterator::Fragment frag; frag.clear(); gmem_iterator.load(frag); // Call dump_fragment() with different parameters. if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nAll threads dump all the elements:\n"); cutlass::debug::dump_fragment(frag); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps all the elements:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps first 16 elements:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1, /*M = */ 16); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps first 16 elements with a stride of 8:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1, /*M = */ 16, /*S = */ 8); // Construct the shared iterator and store the data to the shared memory. SmemIterator smem_iterator( typename SmemIterator::TensorRef( {shared_storage, SmemIterator::Layout::packed( {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL})}), tb_thread_id); smem_iterator.store(frag); // Call dump_shmem() with different parameters. if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nDump all the elements:\n"); cutlass::debug::dump_shmem(shared_storage, EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nDump all the elements with a stride of 8:\n"); cutlass::debug::dump_shmem( shared_storage, EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL, /*S = */ 8); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point for dump_reg_shmem example. // // usage: // // 02_dump_reg_shmem // int main() { // Initialize a 64x32 column major matrix with sequential data (1,2,3...). using Element = cutlass::half_t; using Layout = cutlass::layout::ColumnMajor; cutlass::HostTensor<Element, Layout> matrix( {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL}); cutlass::reference::host::BlockFillSequential(matrix.host_data(), matrix.capacity()); // Dump the matrix. std::cout << "Matrix:\n" << matrix.host_view() << "\n"; // Copy the matrix to the device. matrix.sync_device(); // Define a global iterator, a shared iterator and their thread map. using ThreadMap = cutlass::transform::PitchLinearWarpRakedThreadMap< cutlass::layout::PitchLinearShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, 32, cutlass::layout::PitchLinearShape<8, 4>, 8>; using GmemIterator = cutlass::transform::threadblock::PredicatedTileIterator< cutlass::MatrixShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, Element, Layout, 1, ThreadMap>; typename GmemIterator::Params params(matrix.layout()); using SmemIterator = cutlass::transform::threadblock::RegularTileIterator< cutlass::MatrixShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, Element, cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<16, 64>, 1, ThreadMap>; dim3 grid(1, 1); dim3 block(32, 1, 1); int smem_size = int(sizeof(Element) * EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL); kernel_dump<Element, GmemIterator, SmemIterator> <<<grid, block, smem_size, 0>>>(params, matrix.device_ref()); cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cout << "Failed" << std::endl; } return (result == cudaSuccess ? 0 : -1); } ///////////////////////////////////////////////////////////////////////////////////////////////////
examples/02_dump_reg_shmem/dump_reg_shmem.cu/0
{ "file_path": "examples/02_dump_reg_shmem/dump_reg_shmem.cu", "repo_id": "examples", "token_count": 2448 }
0
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to run matrix multiplication kernels using functions and data structures provided by CUTLASS using tensor cores; which we run on a NVIDIA Turing GPU. Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing high performance kernels at scale which works for multiple problem sizes with good abstractions is really hard. CUTLASS solves this problem by providing simplified abstractions to compose multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU easily. CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp and thread-block level, they compute on their own tile-size with higher level of tile sizes being composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute threadblock-tile (tile size computed by a threadblock). In thie example, we split variable initialization into 1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel can view them (logical to physical mapping) 2. Setting up computation properties : describes how the above set matrices will be used to compute output of matrix multiplication. First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to ElementComputeEpilogue = int32_t. As we want to use MMA instructions on Turing and they support 8-bit signed integer (int8_t), we use data type for elements in input matrix A and B as int8_t. Volta also supports accumulation of partial dot product to int32_t, which can store wider range of numbers, we use it as data type of output matrix elements and accumulation. We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (int32_t), ElementComputeEpilogue (int32_t), ElementInputA (int8_t), ElementInputB (int8_t), ElementOutput (int32_t). Communicating just the data type is not enough. As the data is laid out linearly in memory, we have to convey the layout of matrices. We do that by initializing template variable LayoutInputA to column major cutlass variable, LayoutInputB to row major and LayoutOutput to row major. Next, we setup rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the data type of output ElementOutput (int32_t), the number of elements per vector memory access (16), data type of accumulator (int32_t) and data type of computation of linear combination (alpha * X + beta * C). Now that we setup the properties of data, we have to setup properties of computation. Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x256x64, 64x64x16, 8x8x16 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally deduce the amount of threads needed per thread-block, amount of shared memory, storing data in bank-conflict free manner, and ton of other variables required to compose, initialize and launch a high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from understanding and coding complicated hardware optimizations which can easily go wrong. CUTLASS also supports multiple MMA pipelines in a threadblock. What are MMA pipelines? MMA pipelines constitute the whole process of loading input data from global memory to shared memory, loading data from shared memory to registers, doing matrix multiplication, store to global memory. The below flow sequence shows a typical mma pipeline. matrix in global memory -> registers -> tile in shared memory -> registers -> mma -> registers -> output to global memory The problem with single pipeline is, each stage is synchronous which means, each stage has to wait until the previous finished executing. There are stages in the pipeline which do not have fixed latency, for example, the loads from global memory and shared memory. Therefore, we can add one more pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads. Finally, the pipeline in a kernel looks like (1) matrix in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5) mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) matrix in global memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers -> (9) output to global memory This way, you can hide the second global memoroy load latency by doing computation on already loaded input data. There are few more template variables initialized such as, which threadblock tile of output matrix is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on. These are all put together to create a template variable which describes CUTLASS GEMM kernel using cutlass::gemm::device::Gemm template. The next step is to initialize physical data, instantiate and initialize CUTLASS kernel and run it. We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come in the way of learning CUTLASS. Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space memory required by the kernel we instantiated. If yes, we create it and pass it along with other arguments created to initialize CUTLASS kernel then, the kernel is launched. In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if the output from CUTLASS kernel is same as reference GEMM kernel. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = int32_t; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = int8_t; // <- data type of elements in input matrix A using ElementInputB = int8_t; // <- data type of elements in input matrix B using ElementOutput = int32_t; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Row Major for // Matrix A, Column Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm75; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 256, 64>; // <- threadblock tile M = 128, N = 256, K = 64 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 64>; // <- warp tile M = 64, N = 64, K = 64 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 16>; // <- MMA Op tile M = 8, N = 8, K = 16 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 2; using Gemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; int run() { const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Check the problem size is supported or not cutlass::Status status = gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // Create instantiation for device reference gemm kernel cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device; // Launch device reference gemm kernel gemm_device(problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref()); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; return (passed ? 0 : -1); } int main() { bool notSupported = false; // Turing Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 75)) { std::cerr << "Turing Tensor Core operations must be run on a machine with compute capability at least 75." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } return run(); }
examples/08_turing_tensorop_gemm/turing_tensorop_gemm.cu/0
{ "file_path": "examples/08_turing_tensorop_gemm/turing_tensorop_gemm.cu", "repo_id": "examples", "token_count": 6256 }
1
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/arch.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/gemm/device/default_gemm_configuration.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "kernel/b2b_gemm.h" #include "kernel/default_b2b_gemm.h" #include "kernel/default_b2b_gemm_smem_accumulator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Element type for internal accumulation typename ElementAccumulator_ = ElementC_, /// Operator class tag typename OperatorClass_ = arch::OpClassSimt, /// Tag indicating architecture to tune for typename ArchTag_ = arch::Sm70, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape0_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::ThreadblockShape, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape1_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape0_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::WarpShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape1_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::InstructionShape, /// Epilogue output operator typename EpilogueOutputOp0_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::EpilogueOutputOp, /// Epilogue output operator typename EpilogueOutputOp1_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>, /// Number of stages used in the pipelined mainloop int Stages = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kStages, /// Stage accumulator in shared memory bool SmemAccumulator = false, /// Access granularity of A matrix in units of elements int AlignmentA = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentA, /// Access granularity of B matrix in units of elements int AlignmentB = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentB, /// Operation performed by GEMM typename Operator_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::Operator> class B2bGemm { public: using ElementA = ElementA_; using LayoutA = LayoutA_; using TensorRefA = TensorRef<ElementA const, LayoutA>; using ElementB = ElementB_; using LayoutB = LayoutB_; using TensorRefB = TensorRef<ElementB const, LayoutB>; using ElementC = ElementC_; using LayoutC = LayoutC_; using TensorRefC = TensorRef<ElementC const, LayoutC>; using TensorRefD = TensorRef<ElementC, LayoutC>; using ElementAccumulator = ElementAccumulator_; using OperatorClass = OperatorClass_; using ArchTag = ArchTag_; using ThreadblockShape0 = ThreadblockShape0_; using ThreadblockShape1 = ThreadblockShape1_; using WarpShape0 = WarpShape0_; using WarpShape1 = WarpShape1_; using InstructionShape = InstructionShape_; using EpilogueOutputOp0 = EpilogueOutputOp0_; using EpilogueOutputOp1 = EpilogueOutputOp1_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentB = AlignmentB; static int const kAlignmentC = EpilogueOutputOp1::kCount; static ComplexTransform const kTransformA = ComplexTransform::kNone; static ComplexTransform const kTransformB = ComplexTransform::kNone; /// Derived types using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute; using LayoutScaleBias = layout::RowMajor; /// Define the kernel using B2bGemmKernel = typename kernel::DefaultB2bGemm< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, kStages, Operator, SmemAccumulator >::B2bGemmKernel; using Arguments = typename B2bGemmKernel::Arguments; private: /// Kernel parameters object typename B2bGemmKernel::Params params_; public: /// Constructs the GEMM. B2bGemm() { } /// Determines whether the GEMM can execute the given problem. static Status can_implement(Arguments const &args) { Status status = B2bGemmKernel::can_implement( args.problem_size_0, args.problem_size_1, args.ref_A0.non_const_ref(), args.ref_B0.non_const_ref(), args.ref_C0.non_const_ref(), args.ref_B1.non_const_ref(), args.ref_C1.non_const_ref(), args.ref_D1 ); if (status != Status::kSuccess) { return status; } return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { size_t bytes = 0; // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape( args.problem_size_0, {ThreadblockShape0::kM, ThreadblockShape0::kN, ThreadblockShape0::kK}, args.batch_count); return bytes; } /// Initializes GEMM state from arguments. Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape( args.problem_size_0, {ThreadblockShape0::kM, ThreadblockShape0::kN, ThreadblockShape0::kK}, args.batch_count); // cutlass::gemm::GemmCoord grid_shape_1 = threadblock_swizzle.get_tiled_shape( // args.problem_size_1, // {ThreadblockShape1::kM, ThreadblockShape1::kN, ThreadblockShape1::kK}, // args.batch_count); // Initialize the Params structure params_ = typename B2bGemmKernel::Params{ args.mode, args.problem_size_0, args.problem_size_1, grid_shape, args.ref_A0.non_const_ref(), args.ref_B0.non_const_ref(), args.ref_C0.non_const_ref(), args.ref_Scale0.non_const_ref(), args.ref_Bias0.non_const_ref(), args.ref_B1.non_const_ref(), args.ref_C1.non_const_ref(), args.ref_D1, args.batch_stride_A0, args.batch_stride_B0, args.batch_stride_B1, args.batch_stride_C1, args.batch_stride_D1, args.batch_stride_Bias0, args.batch_stride_Scale0, args.epilogue0, args.epilogue1, static_cast<int *>(workspace), }; return Status::kSuccess; } /// Lightweight update given a subset of arguments Status update(Arguments const &args, void *workspace = nullptr) { params_.ref_A0.reset(args.ref_A0.non_const_ref().data()); params_.ref_B0.reset(args.ref_B0.non_const_ref().data()); params_.ref_C0.reset(args.ref_C0.non_const_ref().data()); params_.ref_Scale0.reset(args.ref_Scale0.non_const_ref().data()); params_.ref_Bias0.reset(args.ref_Bias0.non_const_ref().data()); params_.ref_B1.reset(args.ref_B1.non_const_ref().data()); params_.ref_C1.reset(args.ref_C1.non_const_ref().data()); params_.ref_D1.reset(args.ref_D1.data()); params_.output_op_0 = args.epilogue0; params_.output_op_1 = args.epilogue1; params_.semaphore = static_cast<int *>(workspace); return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape); dim3 block(B2bGemmKernel::kThreadCount, 1, 1); cudaError_t result; int smem_size = int(sizeof(typename B2bGemmKernel::SharedStorage)); if (smem_size >= (48 << 10)) { result = cudaFuncSetAttribute(Kernel<B2bGemmKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } cutlass::Kernel<B2bGemmKernel><<<grid, block, smem_size, stream>>>(params_); result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; } // namespace device } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
examples/13_two_tensor_op_fusion/device/b2b_gemm.h/0
{ "file_path": "examples/13_two_tensor_op_fusion/device/b2b_gemm.h", "repo_id": "examples", "token_count": 4594 }
2
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a multistage threadblock-scoped Implicit GEMM Convolution kernel. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/cache_operation.h" #include "cutlass/gemm/threadblock/mma_base.h" #include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h" #include "threadblock/b2b_mma_base.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape0_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA0_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA0_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA0, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB0_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB0_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB0, /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape1_, /// Iterates over the intermediate accumulator tile // (concept::MmaTensorOpFragmentIterator) typename FragmentIteratorA1_, /// Iterates over vectors of scale and bias vector in global memory // (concept: VectorIterator) typename IteratorAccumulatorScaleBias_, /// WarpIterator to load Scale or Bias vector from threadblock fragment typename FragmentIteratorA1ScaleBias_, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB1_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB1_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB1, /// Output operator for 1st Gemm(concept: epilogue::thread::LinearCombinationClamp, etc...) typename OutputOp_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy0_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy1_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class B2bImplicitGemmMultistage : public gemm::threadblock::B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages> { public: ///< Base class using Base = gemm::threadblock::B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages>; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape0 = Shape0_; ///< Iterates over tiles of A operand in global memory using IteratorA0 = IteratorA0_; ///< Iterates over tiles of B operand in global memory using IteratorB0 = IteratorB0_; ///< Policy describing tuning details using Policy0 = Policy0_; using SmemIteratorA0 = SmemIteratorA0_; using SmemIteratorB0 = SmemIteratorB0_; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape1 = Shape1_; ///< Iterates over tiles of A operand in global memory using FragmentIteratorA1 = FragmentIteratorA1_; ///< Iterates over tiles of the scale and bias vectors in global memory using IteratorAccumulatorScaleBias = IteratorAccumulatorScaleBias_; ///< WarpIterator to load Scale or Bias vector from threadblock fragment using FragmentIteratorA1ScaleBias = FragmentIteratorA1ScaleBias_; ///< Iterates over tiles of B operand in global memory using IteratorB1 = IteratorB1_; ///< Policy describing tuning details using Policy1 = Policy1_; using SmemIteratorB1 = SmemIteratorB1_; ///< Epilogue after 1st Gemm using OutputOp = OutputOp_; static const bool PerChannelScale = (OutputOp::kScale == epilogue::thread::ScaleType::OnlyAlphaPerChannelScaling); static cutlass::arch::CacheOperation::Kind const kCacheOpA0 = CacheOpA0; static cutlass::arch::CacheOperation::Kind const kCacheOpB0 = CacheOpB0; static cutlass::arch::CacheOperation::Kind const kCacheOpB1 = CacheOpB1; // // Dependent types // using ElementC = typename Policy0::Operator::ElementC; /// Fragment of accumulator tile using FragmentC0 = typename Policy0::Operator::FragmentC; /// Warp-level Mma using Operator0 = typename Policy0::Operator; /// Fragment of Scale and Bias loaded from global memory using FragmentA1ScaleBias = typename IteratorAccumulatorScaleBias::Fragment; /// Fragment of accumulator tile using FragmentC1 = typename Policy1::Operator::FragmentC; /// Warp-level Mma using Operator1 = typename Policy1::Operator; /// Internal structure exposed for introspection. struct Detail { static_assert(Base::kWarpGemmIterations0 > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); static_assert(Base::kWarpGemmIterations1 > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); /// Number of cp.async instructions to load one stage of operand A static int const AsyncCopyIterationsPerStageA0 = IteratorA0::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const AsyncCopyIterationsPerStageB0 = IteratorB0::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const AsyncCopyIterationsPerStageB1 = IteratorB1::ThreadMap::Iterations::kCount; /// Number of stages static int const kStages = Stages; /// Number of cp.async instructions to load on group of operand A static int const kAccessesPerGroupA0 = (AsyncCopyIterationsPerStageA0 + Base::kWarpGemmIterations0 - 1) / Base::kWarpGemmIterations0; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB0 = (AsyncCopyIterationsPerStageB0 + Base::kWarpGemmIterations0 - 1) / Base::kWarpGemmIterations0; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB1 = (AsyncCopyIterationsPerStageB1 + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1; }; private: using WarpLoadedFragmentA0 = typename Operator0::FragmentA; using WarpLoadedFragmentB0 = typename Operator0::FragmentB; /// Warp Fragment of operand A1 loaded from accmulator tile using WarpLoadedFragmentA1 = typename FragmentIteratorA1::Fragment; using WarpLoadedFragmentA1ScaleBias = typename FragmentIteratorA1ScaleBias::Fragment; using WarpLoadedFragmentB1 = typename Operator1::FragmentB; using WarpTransformedFragmentA0 = typename Operator0::TransformedFragmentA; using WarpTransformedFragmentB0 = typename Operator0::TransformedFragmentB; using WarpTransformedFragmentA1 = typename Operator1::TransformedFragmentA; using WarpTransformedFragmentB1 = typename Operator1::TransformedFragmentB; private: // // Data members // /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA0 smem_iterator_A0_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB0 smem_iterator_B0_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB1 smem_iterator_B1_; public: /// Construct from tensor references CUTLASS_DEVICE B2bImplicitGemmMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::B2bMmaSharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx ): Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A0_(shared_storage.shared_storage0.operand_A_ref(), thread_idx), smem_iterator_B0_(shared_storage.shared_storage0.operand_B_ref(), thread_idx), smem_iterator_B1_(shared_storage.shared_storage1.operand_B_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount0::kM * Base::WarpCount0::kN); int warp_idx_k = warp_idx / (Base::WarpCount0::kM * Base::WarpCount0::kN); int warp_idx_m = warp_idx_mn % Base::WarpCount0::kM; int warp_idx_n = warp_idx_mn / Base::WarpCount0::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A0_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations0 * warp_idx_k}); this->warp_tile_iterator_B0_.add_tile_offset( {Base::kWarpGemmIterations0 * warp_idx_k, warp_idx_n}); this->warp_tile_iterator_B1_.add_tile_offset( {Base::kWarpGemmIterations1 * warp_idx_k, warp_idx_n}); } CUTLASS_DEVICE void copy_tiles_and_advance_0( IteratorA0 &iterator_A0, IteratorB0 &iterator_B0, int group_start_A0 = 0, int group_start_B0 = 0) { iterator_A0.set_iteration_index(group_start_A0); this->smem_iterator_A0_.set_iteration_index(group_start_A0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupA0; ++j) { if (group_start_A0 + j < Detail::AsyncCopyIterationsPerStageA0) { typename IteratorA0::AccessType *dst_ptr = reinterpret_cast<typename IteratorA0::AccessType *>( this->smem_iterator_A0_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA0::Element>::value * IteratorA0::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA0>( dst_ptr, iterator_A0.get(), iterator_A0.valid()); ++iterator_A0; ++this->smem_iterator_A0_; } } iterator_B0.set_iteration_index(group_start_B0); this->smem_iterator_B0_.set_iteration_index(group_start_B0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB0; ++j) { if (group_start_B0 + j < Detail::AsyncCopyIterationsPerStageB0) { typename IteratorB0::AccessType *dst_ptr = reinterpret_cast<typename IteratorB0::AccessType *>( this->smem_iterator_B0_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB0::Element>::value * IteratorB0::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB0>( dst_ptr, iterator_B0.get(), iterator_B0.valid()); ++iterator_B0; ++this->smem_iterator_B0_; } } } CUTLASS_DEVICE void copy_tiles_and_advance_1( IteratorB1 &iterator_B1, int group_start_B1 = 0) { iterator_B1.set_iteration_index(group_start_B1); this->smem_iterator_B1_.set_iteration_index(group_start_B1); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB1; ++j) { if (group_start_B1 + j < Detail::AsyncCopyIterationsPerStageB1) { typename IteratorB1::AccessType *dst_ptr = reinterpret_cast<typename IteratorB1::AccessType *>( this->smem_iterator_B1_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value * IteratorB1::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>( dst_ptr, iterator_B1.get(), iterator_B1.valid()); ++iterator_B1; ++this->smem_iterator_B1_; } } } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations_0, ///< destination accumulator tile FragmentC1 &accum, ///< iterator over A0 operand in global memory IteratorA0 iterator_A0, ///< iterator over B0 operand in global memory IteratorB0 iterator_B0, ///< iterator over A1 operand scale vector in global memory IteratorAccumulatorScaleBias iterator_A1_scale, ///< iterator over A1 operand bias vector in global memory IteratorAccumulatorScaleBias iterator_A1_bias, ///< iterator over B1 operand in global memory IteratorB1 iterator_B1, ///< initial value of accumulator FragmentC0 const &src_accum, ///< epilogue operation after 1st Gemm OutputOp output_op_0, ///< Imaginary strides used for planar-complex only - ignored here int64_t imag_stride_A = 0, int64_t imag_stride_B = 0) { // // Prologue // // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations_0) { iterator_A0.set_iteration_index(0); this->smem_iterator_A0_.set_iteration_index(0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA0; ++j) { typename IteratorA0::AccessType *dst_ptr = reinterpret_cast<typename IteratorA0::AccessType *>( this->smem_iterator_A0_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA0::Element>::value * IteratorA0::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA0>( dst_ptr, iterator_A0.get(), iterator_A0.valid()); ++iterator_A0; ++this->smem_iterator_A0_; } iterator_B0.set_iteration_index(0); this->smem_iterator_B0_.set_iteration_index(0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB0; ++j) { typename IteratorB0::AccessType *dst_ptr = reinterpret_cast<typename IteratorB0::AccessType *>( this->smem_iterator_B0_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB0::Element>::value * IteratorB0::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB0>( dst_ptr, iterator_B0.get(), iterator_B0.valid()); ++iterator_B0; ++this->smem_iterator_B0_; } // Move to the next stage iterator_A0.advance(); iterator_B0.advance(); this->smem_iterator_A0_.add_tile_offset({0, 1}); this->smem_iterator_B0_.add_tile_offset({1, 0}); // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); } // Perform accumulation in the 'd' output operand FragmentC0 accum0 = src_accum; // Waits until kStages-2 stages have committed. cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA0 warp_loaded_frag_A0[2]; WarpLoadedFragmentB0 warp_loaded_frag_B0[2]; WarpTransformedFragmentA0 warp_transformed_frag_A0[2]; WarpTransformedFragmentB0 warp_transformed_frag_B0[2]; Operator0 warp_mma0; this->warp_tile_iterator_A0_.set_kgroup_index(0); this->warp_tile_iterator_B0_.set_kgroup_index(0); this->warp_tile_iterator_A0_.load(warp_loaded_frag_A0[0]); this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[0]); ++this->warp_tile_iterator_A0_; ++this->warp_tile_iterator_B0_; // Start issuing the first group of the next stage outside of the mainloop copy_tiles_and_advance_0(iterator_A0, iterator_B0); int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma0.transform(warp_transformed_frag_A0[0], warp_transformed_frag_B0[0], warp_loaded_frag_A0[0], warp_loaded_frag_B0[0]); // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations_0 > (-Base::kStages + 1);) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_A0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0); this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0); this->warp_tile_iterator_A0_.load(warp_loaded_frag_A0[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_A0_; ++this->warp_tile_iterator_B0_; if (warp_mma_k > 0) warp_mma0.transform(warp_transformed_frag_A0[warp_mma_k % 2], warp_transformed_frag_B0[warp_mma_k % 2], warp_loaded_frag_A0[warp_mma_k % 2], warp_loaded_frag_B0[warp_mma_k % 2]); // Issue global->shared copies for the next stage int group_start_iteration_A0, group_start_iteration_B0; if (warp_mma_k + 1 == Base::kWarpGemmIterations0) { group_start_iteration_A0 = 0; group_start_iteration_B0 = 0; } else { group_start_iteration_A0 = (warp_mma_k + 1) * Detail::kAccessesPerGroupA0; group_start_iteration_B0 = (warp_mma_k + 1) * Detail::kAccessesPerGroupB0; } copy_tiles_and_advance_0(iterator_A0, iterator_B0, group_start_iteration_A0, group_start_iteration_B0); warp_mma0( accum0, warp_transformed_frag_A0[warp_mma_k % 2], warp_transformed_frag_B0[warp_mma_k % 2], accum0 ); if (warp_mma_k + 1 == Base::kWarpGemmIterations0) warp_mma0.transform(warp_transformed_frag_A0[(warp_mma_k + 1) % 2], warp_transformed_frag_B0[(warp_mma_k + 1) % 2], warp_loaded_frag_A0[(warp_mma_k + 1) % 2], warp_loaded_frag_B0[(warp_mma_k + 1) % 2]); if (warp_mma_k + 2 == Base::kWarpGemmIterations0) { // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages of cp.async have committed arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_A0.advance(); iterator_B0.advance(); this->smem_iterator_A0_.add_tile_offset({0, 1}); this->smem_iterator_B0_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_A0_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B0_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_A0_.add_tile_offset( {0, -Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0}); this->warp_tile_iterator_B0_.add_tile_offset( {-Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } --gemm_k_iterations_0; } } } // Insert fence and wait for all outstanding cp.async operations to commit. cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); // 2nd Implicit Gemm /// Iterator to load a warp-scoped tile of A1 operand from intermediate accumulator tile FragmentIteratorA1 warp_tile_iterator_A1_(accum0); FragmentA1ScaleBias tb_frag_A1_scale; FragmentA1ScaleBias tb_frag_A1_bias; FragmentIteratorA1ScaleBias warp_tile_iterator_A1_scale_(tb_frag_A1_scale); FragmentIteratorA1ScaleBias warp_tile_iterator_A1_bias_(tb_frag_A1_bias); if(PerChannelScale) { tb_frag_A1_scale.clear(); iterator_A1_scale.load(tb_frag_A1_scale); ++iterator_A1_scale; } tb_frag_A1_bias.clear(); iterator_A1_bias.load(tb_frag_A1_bias); ++iterator_A1_bias; // // Prologue // int gemm_k_iterations_1 = FragmentIteratorA1::Policy::kIterations / Base::kWarpGemmIterations1; // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations_1) { iterator_B1.set_iteration_index(0); this->smem_iterator_B1_.set_iteration_index(0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB1; ++j) { typename IteratorB1::AccessType *dst_ptr = reinterpret_cast<typename IteratorB1::AccessType *>( this->smem_iterator_B1_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value * IteratorB1::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>( dst_ptr, iterator_B1.get(), iterator_B1.valid()); ++iterator_B1; ++this->smem_iterator_B1_; } // Move to the next stage iterator_B1.advance(); this->smem_iterator_B1_.add_tile_offset({1, 0}); // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); } // Waits until kStages-2 stages have committed. cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA1 warp_loaded_frag_A1[2]; WarpLoadedFragmentA1ScaleBias warp_loaded_frag_A1_scale[2]; WarpLoadedFragmentA1ScaleBias warp_loaded_frag_A1_bias[2]; WarpLoadedFragmentB1 warp_loaded_frag_B1[2]; WarpTransformedFragmentA1 warp_transformed_frag_A1[2]; WarpTransformedFragmentB1 warp_transformed_frag_B1[2]; Operator1 warp_mma1; if(PerChannelScale) { warp_tile_iterator_A1_scale_.load(warp_loaded_frag_A1_scale[0]); ++warp_tile_iterator_A1_scale_; } warp_tile_iterator_A1_bias_.load(warp_loaded_frag_A1_bias[0]); ++warp_tile_iterator_A1_bias_; warp_tile_iterator_A1_.load(warp_loaded_frag_A1[0], warp_loaded_frag_A1_scale[0], warp_loaded_frag_A1_bias[0], output_op_0); ++warp_tile_iterator_A1_; this->warp_tile_iterator_B1_.set_kgroup_index(0); this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[0]); ++this->warp_tile_iterator_B1_; // Start issuing the first group of the next stage outside of the mainloop copy_tiles_and_advance_1(iterator_B1); smem_write_stage_idx = Base::kStages - 1; smem_read_stage_idx = 0; warp_mma1.transform(warp_transformed_frag_A1[0], warp_transformed_frag_B1[0], warp_loaded_frag_A1[0], warp_loaded_frag_B1[0]); // // Mainloop // CUTLASS_PRAGMA_UNROLL for (gemm_k_iterations_1 = FragmentIteratorA1::Policy::kIterations / Base::kWarpGemmIterations1 - (Base::kStages - 1); gemm_k_iterations_1 > (-Base::kStages + 1); gemm_k_iterations_1--) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) { // Load threadblock-level scale/bias vector from global memory if (warp_mma_k + 1 == Base::kWarpGemmIterations1) { if(PerChannelScale) { tb_frag_A1_scale.clear(); iterator_A1_scale.load(tb_frag_A1_scale); ++iterator_A1_scale; } tb_frag_A1_bias.clear(); iterator_A1_bias.load(tb_frag_A1_bias); ++iterator_A1_bias; } // Load warp-level scale bias fragment from threadblock scale/bias vector if(PerChannelScale) { warp_tile_iterator_A1_scale_.load(warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2]); ++warp_tile_iterator_A1_scale_; } warp_tile_iterator_A1_bias_.load(warp_loaded_frag_A1_bias[(warp_mma_k + 1) % 2]); ++warp_tile_iterator_A1_bias_; // Load warp-level tile from accumulator fragment warp_tile_iterator_A1_.load(warp_loaded_frag_A1[(warp_mma_k + 1) % 2], warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2], warp_loaded_frag_A1_bias[(warp_mma_k + 1) % 2], output_op_0); ++warp_tile_iterator_A1_; // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations1); this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_B1_; if (warp_mma_k > 0) warp_mma1.transform(warp_transformed_frag_A1[warp_mma_k % 2], warp_transformed_frag_B1[warp_mma_k % 2], warp_loaded_frag_A1[warp_mma_k % 2], warp_loaded_frag_B1[warp_mma_k % 2]); // Issue global->shared copies for the next stage int group_start_iteration_B1; if (warp_mma_k + 1 == Base::kWarpGemmIterations1) { group_start_iteration_B1 = 0; } else { group_start_iteration_B1 = (warp_mma_k + 1) * Detail::kAccessesPerGroupB1; } copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1); warp_mma1( accum, warp_transformed_frag_A1[warp_mma_k % 2], warp_transformed_frag_B1[warp_mma_k % 2], accum ); if (warp_mma_k + 1 == Base::kWarpGemmIterations1) warp_mma1.transform(warp_transformed_frag_A1[(warp_mma_k + 1) % 2], warp_transformed_frag_B1[(warp_mma_k + 1) % 2], warp_loaded_frag_A1[(warp_mma_k + 1) % 2], warp_loaded_frag_B1[(warp_mma_k + 1) % 2]); if (warp_mma_k + 2 == Base::kWarpGemmIterations1) { // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages of cp.async have committed arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_B1.advance(); this->smem_iterator_B1_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_B1_.add_tile_offset( {-Base::kStages * Policy1::kPartitionsK * Base::kWarpGemmIterations1, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } } } } // Insert fence and wait for all outstanding cp.async operations to commit. cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
examples/13_two_tensor_op_fusion/threadblock/b2b_implicit_gemm_multistage.h/0
{ "file_path": "examples/13_two_tensor_op_fusion/threadblock/b2b_implicit_gemm_multistage.h", "repo_id": "examples", "token_count": 13619 }
3
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** Please check example 07, 08 and 17 for the basics of dense tensor op gemm kernels. NVIDIA Ampere architecture also supports structured sparse tensor op for tf32, fp16, int8 and int4. Sparse GEMM kernels needs to takes an additional E matrix which stores the meta data. The format of meta data is different for every data types. CUTLASS templates can automatically infer it based on input A and B. Check code below. Moreover, matrix E needs to be preprocessed so that it can use ldmatrix to load into the registers efficiently. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_sparse.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/host_reorder.h" #include "cutlass/util/host_uncompress.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = int32_t; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::int4b_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::int4b_t; // <- data type of elements in input matrix B using ElementOutput = int32_t; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Row Major for // Matrix A, Column Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 256>; // <- threadblock tile M = 128, N = 128, K = 256 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 256>; // <- warp tile M = 64, N = 64, K = 256 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 128>; // <- MMA Op tile M = 16, N = 8, K = 128 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 3; using Gemm = cutlass::gemm::device::SparseGemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; // Data type and layout of meta data matrix E can be inferred from template Gemm. using ElementInputE = typename Gemm::ElementE; using LayoutInputE = cutlass::layout::RowMajor; using ReorderedLayoutInputE = typename Gemm::LayoutE; // Blow property is defined in include/cutlass/arch/sp_mma_sm80.h // 50% Sparsity on Ampere constexpr int kSparse = Gemm::kSparse; // How many elements of A are covered per ElementE constexpr int kElementsPerElementE = Gemm::kElementsPerElementE; // The size of individual meta data constexpr int kMetaSizeInBits = Gemm::kMetaSizeInBits; int run() { const int length_m = 512; const int length_n = 512; const int length_k = 1024; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse)); // <- Create matrix A with dimensions M x (K / 2) cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a_uncompressed( problem_size.mk()); // <- Create uncompressed matrix A with dimensions M x K for reference computing cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Create matrix E with dimensions M x (K / 2 / kElementsPerElementE). This one is used by reference computing. cutlass::HostTensor<ElementInputE, LayoutInputE> tensor_e( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE)); // Same size as the above. The above one needs to be reordered and stored in this one. cutlass::HostTensor<ElementInputE, ReorderedLayoutInputE> tensor_e_reordered( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE)); // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(2), ElementInputA(-2), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(2), ElementInputB(-2), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(2), ElementOutput(-2), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomSparseMeta( tensor_e.host_view(), 1, kMetaSizeInBits); // <- Fill matrix E on host with uniform-distribution random meta data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Reorder the meta data matrix so that we can use ldmatrix to load them to tensor core // instructions. cutlass::reorder_meta(tensor_e_reordered.host_ref(), tensor_e.host_ref(), {problem_size.m(), problem_size.n(), problem_size.k() / kSparse / kElementsPerElementE}); // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_e_reordered.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device tensor_e_reordered.device_ref(), // <- reference to matrix E on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Check the problem size is supported or not cutlass::Status status = gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // uncompress tensor_a based on meta data tensor_e. We need it for reference computing. cutlass::uncompress(tensor_a_uncompressed.host_ref(), tensor_a.host_ref(), tensor_e.host_ref(), problem_size.m(), problem_size.k()); // Create instantiation for host reference gemm kernel cutlass::reference::host::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue, typename Gemm::Operator> gemm_host; // Launch host reference gemm kernel gemm_host(problem_size, alpha, tensor_a_uncompressed.host_ref(), tensor_b.host_ref(), beta, tensor_c.host_ref(), tensor_ref_d.host_ref()); // Copy output data from CUTLASS host for comparison tensor_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; return (passed ? 0 : -1); } int main() { bool notSupported = false; // Ampere Sparse Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.1. // // CUTLASS must be compiled with CUDA 11.1 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.1 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (props.major * 10 + props.minor < 80) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } return run(); }
examples/15_ampere_sparse_tensorop_gemm/ampere_sparse_tensorop_gemm.cu/0
{ "file_path": "examples/15_ampere_sparse_tensorop_gemm/ampere_sparse_tensorop_gemm.cu", "repo_id": "examples", "token_count": 5908 }
4
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include <iostream> #include <fstream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/conv/kernel/default_conv2d_fprop.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/convolution.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output tensors and computation between // elements using Element = cutlass::Quaternion<float>; using ElementAccumulator = Element; // Data type of accumulator using ElementComputeEpilogue = Element; // Data type of epilogue computation (alpha, beta) using ElementInputA = Element; // Data type of elements in input tensor using ElementInputB = Element; // Data type of elements in input tensor using ElementOutput = Element; // Data type of elements in output tensor using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassSimt; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm50; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; // SIMT instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // Number of pipelines you want to use constexpr int NumStages = 2; // This code section describe iterator algorithm selected is Analytic or Optimized static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue>; // Data type for alpha/beta in linear combination using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm >::Kernel; using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::Tensor4DCoord input_size; cutlass::Tensor4DCoord filter_size; cutlass::Tensor4DCoord padding; cutlass::MatrixCoord conv_stride; cutlass::MatrixCoord dilation; bool reference_check; bool measure_performance; int iterations; bool save_workspace; ElementComputeEpilogue alpha; ElementComputeEpilogue beta; bool benchmark; std::string tag; Options(): help(false), input_size(1, 32, 32, 32), filter_size(32, 3, 3, 32), padding(1, 1, 1, 1), conv_stride(1, 1), dilation(1, 1), reference_check(false), measure_performance(true), iterations(20), save_workspace(false), alpha(1), beta(0), benchmark(false) { } // Verify the problem size is compatible with the CUTLASS Convolution implementation. bool valid() { // // CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently, // all pointers, strides, and tensor extents must be divisible by 8 elements. // int const kAlignment = 8; if ((input_size.c() % kAlignment) || (filter_size.n() % kAlignment)) { // misaligned tensors return false; } // Invalid padding if ((padding.h() != filter_size.h() / 2) || (padding.w() != filter_size.w() / 2)) { return false; } return true; } /// Updates input and filter sizes void update( cutlass::Tensor4DCoord input_size, cutlass::Tensor4DCoord filter_size) { this->input_size = input_size; this->filter_size = filter_size; padding.n() = filter_size.h() / 2; padding.h() = filter_size.h() / 2; padding.w() = filter_size.w() / 2; padding.c() = filter_size.w() / 2; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } if (cmd.check_cmd_line_flag("ref-check")) { reference_check = true; } if (cmd.check_cmd_line_flag("perf-check")) { measure_performance = true; } if (cmd.check_cmd_line_flag("save-workspace")) { save_workspace = true; } if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } cmd.get_cmd_line_argument("n", input_size.n()); cmd.get_cmd_line_argument("h", input_size.h()); cmd.get_cmd_line_argument("w", input_size.w()); cmd.get_cmd_line_argument("c", input_size.c()); cmd.get_cmd_line_argument("k", filter_size.n()); cmd.get_cmd_line_argument("r", filter_size.h()); cmd.get_cmd_line_argument("s", filter_size.w()); filter_size.c() = input_size.c(); cmd.get_cmd_line_argument("alpha_w", alpha.w()); cmd.get_cmd_line_argument("alpha_x", alpha.x()); cmd.get_cmd_line_argument("alpha_y", alpha.y()); cmd.get_cmd_line_argument("alpha_z", alpha.z()); cmd.get_cmd_line_argument("beta_w", beta.w()); cmd.get_cmd_line_argument("beta_x", beta.x()); cmd.get_cmd_line_argument("beta_y", beta.y()); cmd.get_cmd_line_argument("beta_z", beta.z()); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("tag", tag); if (filter_size.h() == 3 && filter_size.w() == 3) { padding = {1, 1, 1, 1}; } else { filter_size.h() = 1; filter_size.w() = 1; padding = {0, 0, 0, 0}; } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "22_quaternion_conv example\n\n" << " This example uses Ampere's Tensor Core operators on F16 data types to compute\n" << " forward convolution on tensors of layout NHWC.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --n=<int> Input tensor extent N\n" << " --h=<int> Input tensor extent H\n" << " --w=<int> Input tensor extent W\n" << " --c=<int> Input tensor extent C\n" << " --k=<int> Filter extent K\n" << " --r=<int> Filter extent R\n" << " --s=<int> Filter extent S\n\n" << " --alpha=<float> Epilogue scalar alpha\n" << " --beta=<float> Epilogue scalar beta\n\n" << " --ref-check If set (true), reference check on the host is computed\n" << " --perf-check If set (true), performance is measured.\n" << " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --save-workspace If set, workspace is written to a text file.\n" << " --tag=<string> String to replicate across the first column in the results table\n"; out << "\n\nExamples:\n\n" << "$ ./examples/22_quaternion_conv/22_quaternion_conv --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n" << "$ ./examples/22_quaternion_conv/22_quaternion_conv --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n"; return out; } /// Computes the output tensor size (NPQK) cutlass::Tensor4DCoord output_size() const { return cutlass::Tensor4DCoord( input_size.n(), (input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1, (input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1, filter_size.n()); } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of multiply-adds = NPQK * CRS int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c()) * 16; // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// struct Result { double runtime_ms; double gflops; cutlass::Status status; cutlass::Status reference_check; cudaError_t error; Result(): runtime_ms(0), gflops(0), status(cutlass::Status::kSuccess), reference_check(cutlass::Status::kInvalid), error(cudaSuccess) { } static std::ostream & print_header(std::ostream &out, Options const &options) { if (!options.tag.empty()) { out << "Name,"; } out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs"; return out; } std::ostream & print(std::ostream &out, int idx, Options const &options) { if (!options.tag.empty()) { out << options.tag << ","; } out << "conv_" << idx << "," << options.input_size.n() << "," << options.input_size.h() << "," << options.input_size.w() << "," << options.input_size.c() << "," << options.filter_size.n() << "," << options.filter_size.h() << "," << options.filter_size.w() << "," << runtime_ms << "," << gflops; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Runs one benchmark Result profile_convolution(Options const &options) { Result result; // // Allocate host-device tensors using the CUTLASS Utilities. // cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size()); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_c(options.output_size()); // // Initialize tensors // // Fill tensor A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, 7, -8, 0); // Fill tensor B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, 7, -8, 0); // Fill tensor C on host with zeros cutlass::reference::host::TensorFill( tensor_c.host_view()); // Fill tensor C for reference on host with zeros cutlass::reference::host::TensorFill( tensor_ref_c.host_view()); // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_ref_c.sync_device(); // // Define arguments for CUTLASS Convolution // cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation; // Split K dimension into 1 partitions int split_k_slices = 1; // Construct Conv2dProblemSize with user defined output size cutlass::conv::Conv2dProblemSize problem_size( options.input_size, options.filter_size, options.padding, options.conv_stride, options.dilation, options.output_size(), mode, split_k_slices ); // Construct ImplicitGemm::Argument structure with conv2d // problem size, data pointers, and epilogue values typename ImplicitGemm::Arguments arguments{ problem_size, tensor_a.device_ref(), tensor_b.device_ref(), tensor_c.device_ref(), tensor_c.device_ref(), {options.alpha, options.beta}, }; // // Initialize CUTLASS Convolution // ImplicitGemm implicit_gemm_op; size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); result.status = implicit_gemm_op.can_implement(arguments); CUTLASS_CHECK(result.status); result.status = implicit_gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(result.status); // // Launch initialized CUTLASS kernel // result.status = implicit_gemm_op(); CUTLASS_CHECK(result.status); // // Optional reference check // if (options.reference_check) { std::cout << "Verification on host...\n"; // Compute with reference implementation cutlass::reference::host::Conv2dFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator >( problem_size, tensor_a.host_ref(), tensor_b.host_ref(), tensor_c.host_ref(), tensor_ref_c.host_ref(), options.alpha, options.beta ); // Check if output from CUTLASS kernel and reference kernel are equal or not tensor_c.sync_host(); bool passed = cutlass::reference::host::TensorEquals( tensor_c.host_view(), tensor_ref_c.host_view()); if (!passed) { result.reference_check = cutlass::Status::kErrorInternal; std::cout << "ERROR - results miscompared.\n"; } else { result.reference_check = cutlass::Status::kSuccess; std::cout << "Passed.\n"; } } else { result.reference_check = cutlass::Status::kInvalid; } if (options.save_workspace) { std::stringstream ss; ss << "22_quaternion_conv_" << options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c() << "_" << options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c() << ".dat"; std::ofstream output_workspace(ss.str()); output_workspace << "Input = \n" << tensor_a.host_view() << "\n\n" << "Filters = \n" << tensor_b.host_view() << "\n\n"; if (options.reference_check) { output_workspace << "Reference = \n" << tensor_ref_c.host_view() << "\n\n"; } output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl; std::cout << "Results written to '" << ss.str() << "'." << std::endl; } // // Performance measurement // if (options.measure_performance) { cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } } // Record an event at the start of a series of convolution operations. result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Launch a sequence of implicit GEMM operations on the device for (int iteration = 0; iteration < options.iterations; ++iteration) { result.status = implicit_gemm_op(); CUTLASS_CHECK(result.status); } // Record an event when the convolutions have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Print average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } } return result; } ///////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.benchmark) { // Benchmark several layers int batch_sizes[] = {1, 32, 64, 128, 256, 512}; struct Benchmark { int h, w, c, k, r, s; } layers[] = { {56, 56, 64, 256, 1, 1}, {56, 56, 64, 64, 1, 1}, {56, 56, 64, 64, 3, 3}, {56, 56, 256, 64, 1, 1}, {56, 56, 256, 512, 1, 1}, {56, 56, 256, 128, 1, 1}, {28, 28, 128, 128, 3, 3}, {28, 28, 128, 512, 1, 1}, {28, 28, 512, 128, 1, 1}, {28, 28, 512, 1024, 1, 1}, {28, 28, 512, 256, 1, 1}, {14, 14, 256, 256, 3, 3}, {14, 14, 256, 1024, 1, 1}, {14, 14, 1024, 256, 1, 1}, {14, 14, 1024, 2048, 1, 1}, {14, 14, 1024, 512, 1, 1}, {7, 7, 512, 512, 3, 3}, }; Result::print_header(std::cout, options) << std::endl; int idx = 1; for (auto const &layer : layers) { for (auto N : batch_sizes) { options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c}); Result result = profile_convolution(options); result.print(std::cout, idx, options) << std::endl; } ++idx; } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } Result result = profile_convolution(options); Result::print_header(std::cout, options) << std::endl; result.print(std::cout, 1, options) << std::endl; } return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
examples/22_quaternion_conv/quaternion_conv.cu/0
{ "file_path": "examples/22_quaternion_conv/quaternion_conv.cu", "repo_id": "examples", "token_count": 8258 }
5
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* This example shows how to compute conv2d gradient with respect to weight (wgrad). In wgrad, the K dimension of impligit GEMM, corresponding to the sequential reduction loop, is very large (N * P * Q). Split-k with parallel reduction is highly effective for such cases. Given split_k_slices parameter, it partitions the K loop into split_k_slices chunks and computes partial reductions in parallel across different blocks. After that, a parallel reduction kernel is launched to accumulate partial reductions. In practice, wgrad requires fp32 accumulation to avoid overflow. When the input is fp16, some care is needed to correctly instantiate the GEMM template. */ #include <iostream> #include <fstream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/conv/kernel/default_conv2d_wgrad.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/convolution.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/reduction/device/reduce_split_k.h" #include "cutlass/reduction/thread/reduction_operators.h" #include "helper.h" // The code section below describes datatype for input, output tensors and computation between // elements // In Wgrad, fp32 accumulation is necessary in practice. using ElementAccumulator = float; // Data type of accumulator using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta) using ElementInputA = cutlass::half_t; // Data type of elements in input tensor using ElementInputB = cutlass::half_t; // Data type of elements in input tensor using ElementOutput = cutlass::half_t; // Data type of elements in output tensor using ElementC = ElementOutput; using ElementCompute = ElementComputeEpilogue; using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // Number of pipelines you want to use constexpr int NumStages = 3; // This code section describe iterator algorithm selected is Analytic or Optimized static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized; // We need two epilogue functors - one for GEMM and another for the final reduction. // The epilogue for GEMM is not used, but needed to instantiate the CUTLASS kernel template. // Note that, when the input is fp16 and accumulation is fp32, the output of GEMM needs to be fp32, // the final reduction is done in fp32, and the reduction epilogue converts fp32 outputs to fp16. // Therefore, the output type of the GEMM epilogue is ElementCompute, not ElementOutput. // This code section describes the epilogue part of the kernel, we use default value using EpilogueOpGEMM = cutlass::epilogue::thread::LinearCombination< ElementCompute, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementCompute>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue>; // Data type for alpha/beta in linear combination // The epilogue functor for reduction. This is the one that is actually used. using EpilogueOpReduction = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue>; // Data type for alpha/beta in lin using Conv2dWgradKernel = typename cutlass::conv::kernel::DefaultConv2dWgrad< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementAccumulator, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOpGEMM, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm >::Kernel; using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dWgradKernel>; using EpilogueOutputOp = EpilogueOpReduction; /// Reduction kernel using ReductionOp = cutlass::reduction::thread::ReduceAdd< ElementAccumulator, typename EpilogueOutputOp::ElementAccumulator, EpilogueOutputOp::kCount >; using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK< cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>, EpilogueOutputOp, ReductionOp >; using ReductionDevice = cutlass::reduction::device::ReduceSplitK<ReductionKernel>; using ReductionStrideIndex = typename ReductionDevice::StrideIndex; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::Tensor4DCoord input_size; cutlass::Tensor4DCoord filter_size; cutlass::Tensor4DCoord padding; cutlass::MatrixCoord conv_stride; cutlass::MatrixCoord dilation; bool reference_check; bool measure_performance; int iterations; bool save_workspace; ElementComputeEpilogue alpha; ElementComputeEpilogue beta; int split_k_slices; bool benchmark; std::string tag; Options(): help(false), input_size(1, 32, 32, 32), filter_size(32, 3, 3, 32), padding(1, 1, 1, 1), conv_stride(1, 1), dilation(1, 1), reference_check(true), measure_performance(false), iterations(20), save_workspace(false), alpha(1), beta(0), split_k_slices(8), benchmark(false) { } // Verify the problem size is compatible with the CUTLASS Convolution implementation. bool valid() { // // CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently, // all pointers, strides, and tensor extents must be divisible by 8 elements. // int const kAlignment = 8; if ((input_size.c() % kAlignment) || (filter_size.n() % kAlignment)) { // misaligned tensors return false; } // Invalid padding if ((padding.h() != filter_size.h() / 2) || (padding.w() != filter_size.w() / 2)) { return false; } return true; } /// Updates input and filter sizes void update( cutlass::Tensor4DCoord input_size, cutlass::Tensor4DCoord filter_size, cutlass::MatrixCoord stride) { this->input_size = input_size; this->filter_size = filter_size; conv_stride = stride; padding.n() = filter_size.h() / 2; padding.h() = filter_size.h() / 2; padding.w() = filter_size.w() / 2; padding.c() = filter_size.w() / 2; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } if (cmd.check_cmd_line_flag("ref-check")) { reference_check = true; } if (cmd.check_cmd_line_flag("perf-check")) { measure_performance = true; } if (cmd.check_cmd_line_flag("save-workspace")) { save_workspace = true; } if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } cmd.get_cmd_line_argument("n", input_size.n()); cmd.get_cmd_line_argument("h", input_size.h()); cmd.get_cmd_line_argument("w", input_size.w()); cmd.get_cmd_line_argument("c", input_size.c()); cmd.get_cmd_line_argument("k", filter_size.n()); cmd.get_cmd_line_argument("r", filter_size.h()); cmd.get_cmd_line_argument("s", filter_size.w()); filter_size.c() = input_size.c(); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("split-k-slices", split_k_slices); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("tag", tag); if (filter_size.h() == 3 && filter_size.w() == 3) { padding = {1, 1, 1, 1}; } else { filter_size.h() = 1; filter_size.w() = 1; padding = {0, 0, 0, 0}; } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "30_wgrad_split_k example\n\n" << " This example shows how to compute conv2d gradient with respect to weight (wgrad).\n" << " In wgrad, the K dimension of impligit GEMM, corresponding to the sequential reduction loop, is very large (N * P * Q).\n" << " Split-k with parallel reduction is highly effective for such cases.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --n=<int> Input tensor extent N\n" << " --h=<int> Input tensor extent H\n" << " --w=<int> Input tensor extent W\n" << " --c=<int> Input tensor extent C\n" << " --k=<int> Filter extent K\n" << " --r=<int> Filter extent R\n" << " --s=<int> Filter extent S\n\n" << " --alpha=<float> Epilogue scalar alpha\n" << " --beta=<float> Epilogue scalar beta\n\n" << " --split-k-slices=<int> Split-k factor \n\n" << " --ref-check If set (true), reference check on the host is computed\n" << " --perf-check If set (true), performance is measured.\n" << " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --save-workspace If set, workspace is written to a text file.\n" << " --tag=<string> String to replicate across the first column in the results table\n"; out << "\n\nExamples:\n\n" << "$ ./examples/30_wgrad_split_k/30_wgrad_split_k --n=32 --h=224 --w=224 --c=128 --k=256 --r=3 --s=3 --split-k-slices=8\n\n"; return out; } /// Computes the output tensor size (NPQK) cutlass::Tensor4DCoord output_size() const { return cutlass::Tensor4DCoord(input_size.n(), (input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1, (input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1, filter_size.n()); } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of multiply-adds = NPQK * CRS int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c()); // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// struct Result { double runtime_ms; double gflops; cutlass::Status status; cutlass::Status reference_check; cudaError_t error; Result(): runtime_ms(0), gflops(0), status(cutlass::Status::kSuccess), reference_check(cutlass::Status::kInvalid), error(cudaSuccess) { } static std::ostream & print_header(std::ostream &out, Options const &options) { if (!options.tag.empty()) { out << "Name,"; } out << "Layer,N,H,W,C,K,R,S,Stride_H,Stride_W,Runtime,GFLOPs"; return out; } std::ostream & print(std::ostream &out, int idx, Options const &options) { if (!options.tag.empty()) { out << options.tag << ","; } out << "conv_" << idx << "," << options.input_size.n() << "," << options.input_size.h() << "," << options.input_size.w() << "," << options.input_size.c() << "," << options.filter_size.n() << "," << options.filter_size.h() << "," << options.filter_size.w() << "," << options.conv_stride.row() << "," << options.conv_stride.column() << "," << runtime_ms << "," << gflops; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Runs one benchmark Result profile_convolution(Options const &options) { Result result; // // Allocate host-device tensors using the CUTLASS Utilities. // // Inputs are the output gradient and the original activation. cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.output_size()); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.input_size); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.filter_size); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.filter_size); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.filter_size); // // Initialize tensors // // Fill tensor A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(7), ElementInputA(-8), 0); // Fill tensor B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(7), ElementInputB(-8), 0); // Fill tensor C, D on host with zeros cutlass::reference::host::TensorFill(tensor_c.host_view()); cutlass::reference::host::TensorFill(tensor_d.host_view()); // Fill tensor D for reference on host with zeros cutlass::reference::host::TensorFill(tensor_ref_d.host_view()); // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // // Define arguments for CUTLASS Convolution // cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation; // Partition the GEMM K loop into split_k_slices chunks int split_k_slices = options.split_k_slices; // Construct Conv2dProblemSize with user defined output size // Do not forget to pass the last argument. cutlass::conv::Conv2dProblemSize problem_size( options.input_size, options.filter_size, options.padding, options.conv_stride, options.dilation, options.output_size(), mode, split_k_slices ); using cutlass::layout::TensorNHWC; cutlass::conv::SplitKMode const split_k_mode = cutlass::conv::SplitKMode::kParallel; // Since the epilogue is not computed after GEMM, there is no need to pass the C tensor and // alpha and beta can be set to 1 and 0 respectively. // Moreover, since the output will be written to the workspace, there is no need to pass // the D tensor as well. // Do not forget to pass the last argument. typename ImplicitGemm::Arguments arguments{ problem_size, tensor_a.device_ref(), tensor_b.device_ref(), {nullptr, TensorNHWC()}, {nullptr, TensorNHWC()}, {ElementCompute(1), ElementCompute(0)}, split_k_mode }; // // Initialize CUTLASS Convolution // ImplicitGemm implicit_gemm; size_t workspace_size = implicit_gemm.get_workspace_size(arguments); // Split-K requires non-zero workspace size. The workspace size grows linearly with split_k_slices. std::cout << "split-k-slices: " << split_k_slices << std::endl; std::cout << "workspace size: " << workspace_size << std::endl; // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); result.status = implicit_gemm.can_implement(arguments); CUTLASS_CHECK(result.status); // After the workspace is allocated, we point the GEMM destination pointer to the workspace. TensorNHWC layout_D{TensorNHWC::packed(options.filter_size)}; arguments.ref_D.reset(reinterpret_cast<ElementCompute*>(workspace.get()), layout_D); result.status = implicit_gemm.initialize(arguments, workspace.get()); CUTLASS_CHECK(result.status); // // Launch initialized CUTLASS kernel // result.status = implicit_gemm(); CUTLASS_CHECK(result.status); if (split_k_mode == cutlass::conv::SplitKMode::kParallel) { // Do reduction ReductionDevice reduction_op; auto& status = result.status; static cutlass::conv::Operator const kConvolutionalOperator = ImplicitGemm::kConvolutionalOperator; typename ReductionDevice::Arguments reduction_args( cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn(), problem_size.split_k_slices, cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, problem_size), // Reduction input { reinterpret_cast<ElementAccumulator*> (workspace.get()), ReductionStrideIndex(tensor_c.stride()[ImplicitGemm::UnderlyingKernel::kTensorCStrideIdx]) }, // Destination { tensor_d.device_data(), ReductionStrideIndex(tensor_d.stride()[ImplicitGemm::UnderlyingKernel::kTensorCStrideIdx]) }, // Source { tensor_c.device_data(), ReductionStrideIndex(tensor_c.stride()[ImplicitGemm::UnderlyingKernel::kTensorCStrideIdx]) }, {options.alpha, options.beta} ); status = reduction_op.initialize(reduction_args, nullptr); status = reduction_op(); } // // Optional reference check // if (options.reference_check) { std::cout << "Verification on device...\n"; // Compute with reference implementation cutlass::reference::device::Conv2dWgrad< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator, cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue> >( problem_size, tensor_a.device_ref(), tensor_b.device_ref(), tensor_c.device_ref(), tensor_ref_d.device_ref(), options.alpha, options.beta ); // Check if output from CUTLASS kernel and reference kernel are equal or not tensor_c.sync_host(); tensor_d.sync_host(); tensor_ref_d.sync_host(); bool passed = cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()); if (!passed) { result.reference_check = cutlass::Status::kErrorInternal; std::cout << "ERROR - results miscompared.\n"; } else { result.reference_check = cutlass::Status::kSuccess; std::cout << "Passed.\n"; } } else { result.reference_check = cutlass::Status::kInvalid; } if (options.save_workspace) { std::stringstream ss; ss << "30_wgrad_split_k_" << options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c() << "_" << options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c() << ".dat"; std::ofstream output_workspace(ss.str()); output_workspace << "Input = \n" << tensor_a.host_view() << "\n\n" << "Filters = \n" << tensor_b.host_view() << "\n\n"; if (options.reference_check) { output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n"; } output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl; std::cout << "Results written to '" << ss.str() << "'." << std::endl; } // // Performance measurement // if (options.measure_performance) { cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } } // Record an event at the start of a series of convolution operations. result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Launch a sequence of implicit GEMM operations on the device for (int iteration = 0; iteration < options.iterations; ++iteration) { result.status = implicit_gemm(); CUTLASS_CHECK(result.status); } // Record an event when the convolutions have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Print average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } } return result; } ///////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; CUDA_CHECK(cudaGetDeviceProperties(&props, 0)); if (!(props.major >= 8)) { std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { return 0; } Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.benchmark) { // Benchmark several layers int batch_sizes[] = {34, 408}; struct Benchmark { int h, w, c, k, r, s, stride_h, stride_w; } layers[] = { {56, 56, 64, 256, 1, 1, 1, 1}, {56, 56, 64, 64, 1, 1, 1, 1}, {56, 56, 64, 64, 3, 3, 1, 1}, {56, 56, 256, 64, 1, 1, 1, 1}, {56, 56, 256, 512, 1, 1, 2, 2}, {56, 56, 256, 128, 1, 1, 1, 1}, {56, 56, 128, 128, 3, 3, 2, 2}, {28, 28, 128, 512, 1, 1, 1, 1}, {28, 28, 512, 128, 1, 1, 1, 1}, {28, 28, 128, 128, 3, 3, 1, 1}, {28, 28, 512, 1024, 1, 1, 2, 2}, {28, 28, 512, 256, 1, 1, 1, 1}, {28, 28, 256, 256, 3, 3, 2, 2}, {14, 14, 256, 1024, 1, 1, 1, 1}, {14, 14, 1024, 256, 1, 1, 1, 1}, {14, 14, 256, 256, 3, 3, 1, 1}, {14, 14, 1024, 2048, 1, 1, 2, 2}, {14, 14, 1024, 512, 1, 1, 1, 1}, {14, 14, 512, 512, 3, 3, 2, 2}, { 7, 7, 512, 2048, 1, 1, 1, 1}, { 7, 7, 2048, 512, 1, 1, 1, 1}, { 7, 7, 512, 512, 3, 3, 1, 1}, }; Result::print_header(std::cout, options) << std::endl; int idx = 1; for (auto const &layer : layers) { for (auto N : batch_sizes) { options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c}, {layer.stride_h, layer.stride_w}); Result result = profile_convolution(options); result.print(std::cout, idx, options) << std::endl; } ++idx; } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } Result result = profile_convolution(options); Result::print_header(std::cout, options) << std::endl; result.print(std::cout, 1, options) << std::endl; } return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
examples/30_wgrad_split_k/30_wgrad_split_k.cu/0
{ "file_path": "examples/30_wgrad_split_k/30_wgrad_split_k.cu", "repo_id": "examples", "token_count": 10172 }
6
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a double-buffered threadblock-scoped GEMM kernel. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/threadblock/mma_base.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class CustomMmaBase { public: ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Policy describing tuning details using Policy = Policy_; // // Dependent types // /// Warp-level Mma using Operator = typename Policy::Operator; /// Shape describing the overall GEMM computed from shared memory /// by each warp. using WarpGemm = typename Policy::Operator::Shape; /// Shape describing the number of warps filling the CTA using WarpCount = GemmShape< Shape::kM / WarpGemm::kM, Shape::kN / WarpGemm::kN, Shape::kK / WarpGemm::kK>; /// Number of warp-level GEMM oeprations static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK); /// Number of stages static int const kStages = Stages; // // Nested structs // /// Shared storage object needed by threadblock-scoped GEMM template <typename Element, typename OperandShape, typename OperandLayout> struct OperandSharedStorage { AlignedBuffer<Element, OperandShape::kCount> buffer; using TensorRef = TensorRef<Element, OperandLayout>; CUTLASS_DEVICE static OperandLayout Layout() { return OperandLayout::packed({OperandShape::kRow, OperandShape::kColumn}); } /// Returns a TensorRef to the operand CUTLASS_HOST_DEVICE TensorRef ref() { return TensorRef{buffer.data(), Layout()}; } }; /// Shape of the A matrix operand in shared memory using ShapeA = MatrixShape< Shape::kM + Policy::SmemPaddingA::kRow, Shape::kK * kStages + Policy::SmemPaddingA::kColumn>; /// Shape of the B matrix operand in shared memory using ShapeB = MatrixShape< Shape::kK * kStages + Policy::SmemPaddingB::kRow, Shape::kN + Policy::SmemPaddingB::kColumn>; using SharedStorageA = OperandSharedStorage< typename Operator::ElementA, ShapeA, typename Operator::LayoutA>; using SharedStorageB = OperandSharedStorage< typename Operator::ElementB, ShapeB, typename Operator::LayoutB>; using TensorRefA = typename SharedStorageA::TensorRef; using TensorRefB = typename SharedStorageB::TensorRef; struct SharedStorage { /// Buffer for A operand SharedStorageA operand_A; /// Buffer for B operand SharedStorageB operand_B; }; protected: // // Data members // /// Iterator to load a warp-scoped tile of A operand from shared memory typename Operator::IteratorA warp_tile_iterator_A_; /// Iterator to load a warp-scoped tile of B operand from shared memory typename Operator::IteratorB warp_tile_iterator_B_; public: /// Construct from tensor references CUTLASS_DEVICE CustomMmaBase( ///< Shared storage needed for internal use by threadblock-scoped GEMM SharedStorageA& shared_storageA, SharedStorageB& shared_storageB, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx) : warp_tile_iterator_A_(shared_storageA.ref(), lane_idx), warp_tile_iterator_B_(shared_storageB.ref(), lane_idx) {} }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
examples/41_fused_multi_head_attention/gemm/custom_mma_base.h/0
{ "file_path": "examples/41_fused_multi_head_attention/gemm/custom_mma_base.h", "repo_id": "examples", "token_count": 1908 }
7
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# from typing import List import torch import subprocess import sys import tempfile import os import numpy as np TORCH_DTYPE_NAME = { torch.float32: "f32", torch.float16: "f16", torch.bfloat16: "b16" } NAME_TORCH_DTYPE = {v: k for k, v in TORCH_DTYPE_NAME.items()} def _tensor_from_storage(tensor: torch.Tensor, dtype) -> torch.Tensor: # PyTorch >= 2.0 if hasattr(tensor, 'untyped_storage'): return torch.tensor([], dtype=dtype).set_(tensor.untyped_storage()) return torch.tensor([], dtype=dtype).set_(tensor.storage().untyped()) class PipedSubprocess: def __init__(self, binary: str) -> None: self.binary = binary self.tempdir_ctx = tempfile.TemporaryDirectory() def __enter__(self) -> "PipedSubprocess": self.subp = subprocess.Popen(self.binary, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=sys.stderr, text=True, bufsize=0) self.tempdir = self.tempdir_ctx.__enter__() self.file_counter = 0 return self def __exit__(self, exc_type, exc_val, exc_tb) -> None: self.tempdir_ctx.__exit__(exc_type, exc_val, exc_tb) def temp_filename(self, suffix: str) -> str: self.file_counter += 1 return os.path.join(self.tempdir, f"{self.file_counter}{suffix}") def write(self, *args) -> None: for a in args: self.subp.stdin.write(str(a) + " ") def writeTensor(self, tensor: torch.Tensor, name: str, stride_names: List[str]) -> None: print(f"Py ->C++: {TORCH_DTYPE_NAME[tensor.dtype]}:{name}") tensor_u8 = _tensor_from_storage(tensor, torch.uint8) self.write("tensor_begin", f"{TORCH_DTYPE_NAME[tensor.dtype]}:{name}", tensor_u8.shape[0]) filename = self.temp_filename(f"{name}.tensor") assert tensor.storage_offset() == 0 with open(filename, "wb+") as fd: fd.write(bytes(tensor_u8.numpy())) self.write("file", filename) self.write("tensor_end") for stride_name, stride_value in zip(stride_names, tensor.stride()): self.write(stride_name, stride_value) def readTensor(self, name, stride_name, shape) -> torch.Tensor: tmpfile = self.temp_filename(f"{name}.tensor") self.write("tmpfile", tmpfile) self.readExpect("tensor_begin") dtype_str, name = self.read().split(":") print(f"C++->Py : {dtype_str}:{name}") u8len = int(self.read()) dtype = NAME_TORCH_DTYPE[dtype_str] self.readExpect("file") self.readExpect(tmpfile) with open(tmpfile, "rb") as fd: data = fd.read(u8len) # `np.array` is not strictly needed, but avoids a torch warning tensor_u8 = torch.frombuffer(np.array(data), dtype=torch.uint8, count=u8len) self.readExpect("tensor_end") tensor = _tensor_from_storage(tensor_u8, dtype) strides = [] for sn in stride_name: self.readExpect(sn) strides.append(int(self.read())) if len(strides) != shape: strides.append(1) assert len(strides) == len(shape), name return torch.as_strided(tensor, shape, strides) def readNamed(self, name: str): self.readExpect(name) return self.read() def readExpect(self, what: str) -> None: r = self.read() if r != what: raise ValueError(f"Read {r} but expected {what}") def read(self): read_all = [] # Skip initial whitespace while True: r = self.subp.stdout.read(1) if r not in [' ', "\n"]: read_all.append(r) break # Read data while True: r = self.subp.stdout.read(1) if r in [' ', "\n"]: break read_all.append(r) return ''.join(read_all)
examples/41_fused_multi_head_attention/piped_subprocess.py/0
{ "file_path": "examples/41_fused_multi_head_attention/piped_subprocess.py", "repo_id": "examples", "token_count": 2303 }
8
<jupyter_start><jupyter_text>Basic example of using the CUTLASS Python interface for Conv2dThis notebook walks through a basic example of using the CUTLASS Python interface to declare, compile, and run Conv2d. [](https://colab.research.google.com/github/NVIDIA/cutlass/blob/main/examples/python/03_basic_conv2d.ipynb) Prerequisites for running on ColabThis notebook requires an NVIDIA GPU. If `nvidia-smi` fails, go to Runtime -> Change runtime type -> Hardware accelerator and confirm a GPU is selected.<jupyter_code>!#nvidia-smi<jupyter_output><empty_output><jupyter_text>If running on Colab, you will need to install the CUTLASS Python interface. To do so, uncomment the following line and run the cell:<jupyter_code>!#pip install nvidia-cutlass<jupyter_output><empty_output><jupyter_text>General setupWe first import various packages needed for the example and construct the input and output tensors that will be used in our example.<jupyter_code>import torch import random import cutlass # This controls whether the C++ GEMM declaration will be printed at each step. # Set to `false` to omit this information. print_module = True # Input tensor: [N, H, W, C] under the channel-last layout N, H, W, C = [32, 28, 28, 64] # Weight tensor: [K, R, S, C] under the channel-last layout K, R, S = [128, 3, 3] # Stride, and padding stride = (2, 2) padding = (1, 1) dilation = (1, 1) # Compute the output size [N, P, Q, K] N, P, Q, K = cutlass.Conv2d.output_size((N, H, W, C), (K, R, S, C), padding, stride, dilation) dtype = torch.float16 type_A = torch.float16 type_B = torch.float16 type_C = torch.float16 type_D = torch.float16 torch.manual_seed(1234) input = torch.ceil( torch.empty(size=(N, C, H, W), dtype=type_A, device="cuda").uniform_(-4.5, 3.5) ).to(memory_format=torch.channels_last) weight = torch.ceil( torch.empty(size=(K, C, R, S), dtype=type_B, device="cuda").uniform_(-4.5, 3.5) ).to(memory_format=torch.channels_last) tensor_C = torch.ceil( torch.empty(size=(N, K, P, Q), dtype=type_B, device="cuda").uniform_(-4.5, 3.5) ).to(memory_format=torch.channels_last) output = torch.zeros_like(tensor_C) alpha = 1.0 beta = 0.0<jupyter_output><empty_output><jupyter_text>Declaring and running a Conv2d FpropWe first show you how to run a Conv2d in the forward propagation. To get started, one only needs to provide the tensors declared above to the `cutlass.op.Conv2dFprop` call. This sets up a default Conv2d fprop operation for the given device on which you are running. Assuming that we are runing on SM80, the default is a Conv2d that leverages FP16 Tensor Core operations.Calling `plan.run()` will generate the CUTLASS C++ kernel in question, compile it, and run it on the tensors we previously passed in. By setting `print_module` to `true`, the C++ code that is emitted is printed.<jupyter_code># Specifying `element_accumulator` is not required if it is the same as `element` plan = cutlass.Conv2dFprop(element=dtype, element_accumulator=torch.float32) plan.run(input, weight, tensor_C, output, stride, padding, dilation, alpha, beta, print_module=print_module)<jupyter_output><empty_output><jupyter_text>There are many other ways to construct a plan from `cutlass.op.Conv2dFprop` (e.g., by specifying the types of each operand, by providing representative tensors as input). For more details on these, see the documentation in the `cutlass.op.Conv2dFprop` constructor.We then compare the output to running the Conv2d using PyTorch. PyTorch use NCHW layout by default, so permutations are required.<jupyter_code>output_torch = alpha * torch.ops.aten.conv2d( input, weight, stride=stride, padding=padding, dilation=dilation ) + beta * tensor_C assert torch.equal(output_torch, output)<jupyter_output><empty_output><jupyter_text>Note that one could use the same kernel just declared for tensors provided by other frameworks beyond PyTorch, such as NumPy. Declaring and running Conv2d Dgrad and WgradThe Python interface also supports declaring and running backward kernels of Conv2d. To begin with, we construct the tensors for the gradient of input, output, and weight.<jupyter_code>grad_output = torch.ceil( torch.empty(size=(N, K, P, Q), dtype=type_A, device="cuda").uniform_(-4.5, 3.5) ).to(memory_format=torch.channels_last) grad_input = torch.zeros_like(input) grad_weight = torch.zeros_like(weight) tensor_C_dgrad = torch.ceil( torch.empty(size=(N, C, H, W), dtype=type_A, device="cuda").uniform_(-4.5, 3.5) ).to(memory_format=torch.channels_last) tensor_C_wgrad = torch.ceil( torch.empty(size=(K, C, R, S), dtype=type_B, device="cuda").uniform_(-4.5, 3.5) ).to(memory_format=torch.channels_last)<jupyter_output><empty_output><jupyter_text>The script below gives a simple example of computing a data gradient via the CUTLASS Python interface and via PyTorch.<jupyter_code>plan_dgrad = cutlass.Conv2dDgrad(element=dtype, element_accumulator=torch.float32) plan_dgrad.run(grad_output, weight, tensor_C_dgrad, grad_input, stride, padding, dilation, alpha, beta, print_module=print_module) grad_input_torch = alpha * torch.nn.grad.conv2d_input( (N, C, H, W), weight, grad_output, stride=stride, padding=padding ) + beta * tensor_C_dgrad assert torch.equal(grad_input_torch, grad_input)<jupyter_output><empty_output><jupyter_text>The script below gives a simple example of computing a weight gradient via the CUTLASS Python interface and via PyTorch.<jupyter_code>plan_wgrad = cutlass.Conv2dWgrad(element=dtype, element_accumulator=torch.float32) plan_wgrad.run(grad_output, input, tensor_C_wgrad, grad_weight, stride, padding, dilation, alpha, beta, print_module=print_module) grad_weight_torch = alpha * torch.nn.grad.conv2d_weight( input, (K, C, R, S), grad_output, stride=stride, padding=padding ) + beta * tensor_C_wgrad assert torch.equal(grad_weight_torch, grad_weight)<jupyter_output><empty_output><jupyter_text>Running non-default Conv2dsThe previous examples showed how it is simple to get starting running a default Conv2d kernel in CUTLASS. But, what do you do if you want a bit more control over the parameters to the Conv2d? CUTLASS Python interface exposes mutable parameters that can be set after the `plan` initialization. We summarize these in the table below.|Parameter|Description|| -- | -- ||`tile_description`|The threadblock tile size, warp count, software pipeline stages, and instruction shape||`iterator_algorithm`|The iterator algorithm used to access the source operands||`swizzling_stride`|The stride of the threadblock swizzling functor||`split-K`|Partitions the reduction dimension to different threadblocks| Tile DescriptionThe `tile_description` defines the tiling size of each threadblock, the warp count along each dimension of the tile, the software pipeline stages, and the instruction size. Under the hood, CUTLASS enumerates the different Conv2d configuration parameters for this kernel from the CUTLASS profiler. The code below shows how one can access the tile descriptions for the kernel (e.g., threadblock and warp shape).<jupyter_code>plan.opclass = "tensor_op" tiles = plan.tile_descriptions() print(f'{len(tiles)} tile descriptions returned') num_print = 10 print(f'First {num_print} tile descriptions are:') for td in tiles[:num_print]: print(td)<jupyter_output><empty_output><jupyter_text>Next, we'll pick one of these configurations at random and compile and run it.<jupyter_code>random.seed(42) idx = random.randint(0, len(tiles)-1) td = tiles[idx] print(f'Tile description {idx} is: {td}') plan.tile_description = td plan.run(input, weight, tensor_C, output, stride, padding, dilation, alpha, beta, print_module=print_module) assert torch.equal(output_torch, output)<jupyter_output><empty_output><jupyter_text>Besides tile descriptions enumerated by CUTLASS, the users can also explicitly set the `threadblockshape`, `warp_shape`, `stages`, `instruction_shape`, and `cluster_shape`. If the configuration is invalid, an exception will be raised at `plan.run()` and the detailed compilation error will be stored in `./cutlass_python_compilation_error.txt` for debugging.<jupyter_code>if plan.cc == 70: plan.tile_description = { "threadblock_shape": [64, 256, 32], "warp_count": [1, 4, 1], "stages": 2, "instruction_shape": [8, 8, 4], # optional, "cluster_shape": [1, 1, 1] # optional, only [1, 1, 1] is supported currently } elif plan.cc == 75: plan.tile_description = { "threadblock_shape": [128, 64, 32], "warp_count": [2, 1, 1], "stages": 2, "instruction_shape": [16, 8, 8], # optional, "cluster_shape": [1, 1, 1] # optional, only [1, 1, 1] is supported currently } elif plan.cc == 80: plan.tile_description = { "threadblock_shape": [128, 128, 64], "warp_count": [2, 2, 1], "stages": 4, "instruction_shape": [16, 8, 16], # optional, "cluster_shape": [1, 1, 1] # optional, only [1, 1, 1] is supported currently } elif plan.cc == 86: plan.tile_description = { "threadblock_shape": [128, 64, 64], "warp_count": [2, 2, 1], "stages": 3, "instruction_shape": [16, 8, 16], "cluster_shape": [1, 1, 1] } plan.run(input, weight, tensor_C, output, stride, padding, dilation, alpha, beta, print_module=print_module) assert torch.equal(output_torch, output)<jupyter_output><empty_output><jupyter_text>Iterator AlgorithmThe iterator algorithm describes how sources are loaded from memory. There are some iterator algorithms optimized for specific alignments and input/output channels that have better performance. The table below illustrates the available iterator algorithms.|Conv Kind | Iterator Algorithm | Description || -- | -- | -- ||Fprop | "analytic" | Functionally correct in all cases but lower performance || | "optimized" | Optimized for and requires `R <= 32`, `S<= 32`, and `C % alignment_input == 0`|| | "few_channels" | optimized for small `C` and requires `C % alignment_input == 0`|| | "fixed_channels" | optimized for small `C` and requires `C == alignment_input` ||Dgrad | "analytic" | Functionally correct in all cases but lower performance || | "optimized" | Optimzed for and require `R <= 32`, `S<= 32`, `K % alignment_grad_output == 0`, and `C % alignment_weight == 0`||Wgrad | "analytic" | Functionally correct in all cases but lower performance || | "optimized" | Optimized for and require `K % alignment_grad_output == 0`, and `C % alignment_input == 0`|By default, the Python interface will automatically propose a suitable iterator algorithm based on the input tensors in `plan.run()`. However, the user can also specify the desired iterator algorithm as follows<jupyter_code>plan.iterator_algorithm = "analytic" plan.run(input, weight, tensor_C, output, stride, padding, dilation, alpha, beta, print_module=print_module) assert torch.equal(output_torch, output)<jupyter_output><empty_output><jupyter_text>If the iterator algorithm is invalid for the problem size in `plan.run()`, an exception will be raised. Swizzling StrideThe swizzling changes how the tile are mapped to threadblocks to improve the L2 Locality. Given a swizzling stride `N`, the threadblock `(tb_x, tb_y)` computes tile `(tb_x / N, tb_y * N + (tb_x % N))`. Currently, stride values of `1`, `2`, `4`, and `8` are supported for `fprop`, `wgrad`, and `1`, and `4` for `dgrad`. The swizzling stride can be set with:<jupyter_code>plan.swizzling_stride = 4 plan.run(input, weight, tensor_C, output, stride, padding, dilation, alpha, beta, print_module=print_module) assert torch.equal(output_torch, output)<jupyter_output><empty_output><jupyter_text>Split-KSplit-K is usually applied when the Conv2d has small spatial dimensions and large reduction dimension to ensure good utilization. It further partitions the reduction dimension to different threadblocks. The CUTLASS Python interface supports two types of split-K strategies: `Parallel`, and `Serial`. * `Parallel`: the partial results from different threadblocks are stored in a temporary buffer in the global memory. When the Conv2d is done, a separate reduction kernel is created and launched to reduce the partial results.* `Serial`: A semaphore is used to coordinate the order of different threadblocks adding their partial results to a given output tile. A separate kernel does not need to be launched for prforming the reduction.While all `fprop`, `dgrad`, and `wgrad` support split-K, here we use `wgrad` as an example.<jupyter_code># Parallel Split-K with 5 slices grad_weight_parallel = torch.zeros_like(grad_weight) plan_wgrad.run( grad_output, input, tensor_C_wgrad, grad_weight_parallel, stride, padding, dilation, alpha, beta, print_module=print_module, split_k=("parallel", 5)) assert torch.equal(grad_weight_torch, grad_weight_parallel) # Serial Split-K with 3 slices grad_weight_serial = torch.zeros_like(grad_weight) plan_wgrad.run( grad_output, input, tensor_C_wgrad, grad_weight_serial, stride, padding, dilation, alpha, beta, print_module=print_module, split_k=("serial", 3)) assert torch.equal(grad_weight_torch, grad_weight_serial)<jupyter_output><empty_output>
examples/python/03_basic_conv2d.ipynb/0
{ "file_path": "examples/python/03_basic_conv2d.ipynb", "repo_id": "examples", "token_count": 4484 }
9
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/arch/util.hpp> #include <cute/numeric/numeric_types.hpp> namespace cute { // // Direct Copy for any type // template <class S, class D = S> struct UniversalCopy { using SRegisters = S[1]; using DRegisters = D[1]; template <class S_, class D_> CUTE_HOST_DEVICE static constexpr void copy(S_ const& src, D_ & dst) { dst = static_cast<D>(static_cast<S>(src)); } // Accept mutable temporaries template <class S_, class D_> CUTE_HOST_DEVICE static constexpr void copy(S_ const& src, D_ && dst) { UniversalCopy<S,D>::copy(src, dst); } }; // // Placeholder for the copy algorithm's stronger auto-vectorizing behavior // that assumes alignment of pointers and dynamic layouts up to MaxVecBits // template <int MaxVecBits = 128> struct AutoVectorizingCopyWithAssumedAlignment : UniversalCopy<uint_bit_t<MaxVecBits>> { static_assert(MaxVecBits == 8 || MaxVecBits == 16 || MaxVecBits == 32 || MaxVecBits == 64 || MaxVecBits == 128, "Expected MaxVecBits to be 8 or 16 or 32 or 64 or 128 for alignment and performance."); }; // // AutoVectorizingCopy alias assumes maximal alignment of pointers and dynamic strides. // If this is not the case then AutoVectorizingCopyWithAssumedAlignment should be used instead // using AutoVectorizingCopy = AutoVectorizingCopyWithAssumedAlignment<128>; // // DefaultCopy alias does not assume alignment of pointers or dynamic strides. // using DefaultCopy = AutoVectorizingCopyWithAssumedAlignment<8>; // // Global memory prefetch into L2 // CUTE_HOST_DEVICE static void prefetch(void const* gmem_ptr) { #if defined(__CUDA_ARCH__) asm volatile("prefetch.global.L2 [%0];\n" : : "l"(gmem_ptr) : "memory"); #endif } } // end namespace cute
include/cute/arch/copy.hpp/0
{ "file_path": "include/cute/arch/copy.hpp", "repo_id": "include", "token_count": 1110 }
10
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/arch/copy.hpp> #include <cute/atom/copy_traits.hpp> #include <cute/atom/mma_atom.hpp> #include <cute/util/type_traits.hpp> #include <cute/tensor_impl.hpp> namespace cute { template <class... Args> struct Copy_Atom; template <class CopyOperation, class CopyInternalType> struct Copy_Atom<CopyOperation, CopyInternalType> : Copy_Atom<Copy_Traits<CopyOperation>, CopyInternalType> {}; template <class... Args, class CopyInternalType> struct Copy_Atom<Copy_Traits<Args...>, CopyInternalType> : Copy_Traits<Args...> { using Traits = Copy_Traits<Args...>; // Bit and Thr layouts from the Copy_Traits using ThrID = typename Traits::ThrID; using BitLayoutSrc = typename Traits::SrcLayout; using BitLayoutDst = typename Traits::DstLayout; using BitLayoutRef = typename Traits::RefLayout; using ValType = CopyInternalType; using ValLayoutSrc = decltype(recast_layout<uint1_t, ValType>(BitLayoutSrc{})); using ValLayoutDst = decltype(recast_layout<uint1_t, ValType>(BitLayoutDst{})); using ValLayoutRef = decltype(recast_layout<uint1_t, ValType>(BitLayoutRef{})); CUTE_STATIC_ASSERT_V(size<0>(ValLayoutSrc{}) == size(ThrID{}), "CopyOperation is not valid for Src of ValType."); CUTE_STATIC_ASSERT_V(size<0>(ValLayoutDst{}) == size(ThrID{}), "CopyOperation is not valid for Dst of ValType."); CUTE_STATIC_ASSERT_V(size<0>(ValLayoutRef{}) == size(ThrID{}), "CopyOperation is not valid for Ref of ValType."); static constexpr int NumValSrc = size<1>(ValLayoutSrc{}); static constexpr int NumValDst = size<1>(ValLayoutDst{}); // Additional Trait parameters/transformations template <class... TraitsArgs> CUTE_HOST_DEVICE auto with(TraitsArgs&&... args) const { auto traits = Traits::with(static_cast<TraitsArgs&&>(args)...); return Copy_Atom<decltype(traits), CopyInternalType>{traits}; } // // Tensor call interfaces // // Check and call instruction, or recurse template <class SEngine, class SLayout, class DEngine, class DLayout> CUTE_HOST_DEVICE void call(Tensor<SEngine,SLayout> const& src, Tensor<DEngine,DLayout> & dst) const { static_assert(SLayout::rank == 1, "Expected rank-1 src tensor"); static_assert(DLayout::rank == 1, "Expected rank-1 dst tensor"); if constexpr (is_constant<NumValSrc, decltype(size(src))>::value || is_constant<NumValDst, decltype(size(dst))>::value) { // Dispatch to unpack to execute instruction return copy_unpack(*this, src, dst); } else if constexpr (is_tuple<decltype(shape(src))>::value && is_tuple<decltype(shape(dst))>::value) { // If the size of the src/dst doesn't match the instruction, // recurse this rank-1 layout by peeling off the mode // ((A,B,C,...)) -> (A,B,C,...) return copy(*this, tensor<0>(src), tensor<0>(dst)); } else { static_assert(dependent_false<SEngine>, "No instruction match and no recursion possible."); } } // Accept mutable temporaries template <class SEngine, class SLayout, class DEngine, class DLayout> CUTE_HOST_DEVICE void call(Tensor<SEngine,SLayout> const& src, Tensor<DEngine,DLayout> && dst) const { return call(src, dst); } }; // // A tiling of copy atoms // template <class TiledCopy, class ThrIdx> struct ThrCopy; template <class Copy_Atom, class LayoutCopy_TV, // (tid,vid) -> coord [Need not be 2D...] class ShapeTiler_MN> // coord space struct TiledCopy : Copy_Atom { // Layout information from the CopyAtom using AtomThrID = typename Copy_Atom::ThrID; // thrid -> thr_idx using AtomLayoutSrc = typename Copy_Atom::ValLayoutSrc; // (thr,val) -> offset using AtomLayoutDst = typename Copy_Atom::ValLayoutDst; // (thr,val) -> offset using AtomLayoutRef = typename Copy_Atom::ValLayoutRef; // (thr,val) -> offset using AtomNumThr = decltype(size<0>(AtomLayoutRef{})); using AtomNumVal = decltype(size<1>(AtomLayoutRef{})); // Layout information for the TiledCopy using Tiler_MN = ShapeTiler_MN; using TiledLayout_TV = LayoutCopy_TV; using TiledNumThr = decltype(size<0>(TiledLayout_TV{})); using TiledNumVal = decltype(size<1>(TiledLayout_TV{})); CUTE_STATIC_ASSERT_V(TiledNumThr{} % AtomNumThr{} == Int<0>{}, "TiledCopy uses too few thrs for selected CopyAtom"); CUTE_STATIC_ASSERT_V(TiledNumVal{} % AtomNumVal{} == Int<0>{}, "TiledCopy uses too few vals for selected CopyAtom"); // Tile a tensor or a layout from shape // (M,N,...) // to shape // ((ThrV,ThrX),FrgV,(RestM,RestN,...)) // where // ThrV: The threads local to a COPY_ATOM Src. // ThrX: The threads tiled across COPY_ATOMs Src. // FrgV: The values local to a COPY_ATOM Src. // RestM: The values tiled in M. // RestN: The values tiled in N. template <class STensor> CUTE_HOST_DEVICE constexpr static auto tidfrg_S(STensor&& stensor) { CUTE_STATIC_ASSERT_V(rank(stensor) >= rank(Tiler_MN{}), "Rank of tensor to be partitioned too small."); // Tile the stensor and compute the (src-thr, src-val) -> (ref-thr, ref-val) layout return tile2thrfrg(zipped_divide(stensor,Tiler_MN{}), right_inverse(AtomLayoutRef{}).compose(AtomLayoutSrc{})); } // Tile a tensor or a layout from shape // (M,N,...) // to shape // ((ThrV,ThrX),FrgV,(RestM,RestN,...)) // where // ThrV: The threads local to a COPY_ATOM Dst. // ThrX: The threads tiled across COPY_ATOMs Dst. // FrgV: The values local to a COPY_ATOM Dst. // RestM: The values tiled in M. // RestN: The values tiled in N. template <class DTensor> CUTE_HOST_DEVICE constexpr static auto tidfrg_D(DTensor&& dtensor) { CUTE_STATIC_ASSERT_V(rank(dtensor) >= rank(Tiler_MN{}), "Rank of tensor to be partitioned too small."); // Tile the dtensor and compute the (dst-thr, dst-val) -> (ref-thr, ref-val) layout return tile2thrfrg(zipped_divide(dtensor,Tiler_MN{}), right_inverse(AtomLayoutRef{}).compose(AtomLayoutDst{})); } // Tile a tensor or a layout from shape // ((TileM,TileN,...), (RestM,RestN,...)) // to shape // ((ThrV,ThrX),FrgV,(RestM,RestN,...)) template <class Tensor, class Ref2TrgLayout> CUTE_HOST_DEVICE constexpr static auto tile2thrfrg(Tensor&& tensor, Ref2TrgLayout const& ref2trg) { // Take the thrs/vals that the atom is interested in // NOTE: Assumes the AtomNumThr are contiguous and identity within TiledThrID auto atom_layout_TV = zipped_divide(TiledLayout_TV{}, make_shape(AtomNumThr{}, AtomNumVal{})); // ((atom_tid,atom_val),(rest_tid,rest_val)) -> (m,n) // Transform to the trg layout auto trg_layout_TV = atom_layout_TV.compose(ref2trg, _); // ((trg_tid,trg_val),(rest_tid,rest_val)) -> (m,n) // Transform the thrs mode from thrid to thr_idx // NOTE: Assumes the AtomNumThr are contiguous and identity within TiledThrID auto thrval2mn = coalesce(zip(trg_layout_TV), Shape<_1,Shape<_1,_1>>{}); // ((trg_tid,rest_tid),(trg_val,rest_val)) -> (m,n) /// ================== // Transform the tile mode auto tv_tensor = tensor.compose(thrval2mn, _); // ((thrid,val),(RestM,RestN,...)) // Unfold and return return tv_tensor(make_coord(_,_), _); } // retile_S and retile_D assume they are working with the reference layout -- they are the same template <class Tensor> CUTE_HOST_DEVICE constexpr static auto retile(Tensor&& tensor) { constexpr int R = remove_cvref_t<Tensor>::rank; // Assert that AtomLayoutSrc|Dst is identity so we can skip the Ref transformation // Assume the first size<0>(tensor) elements are the first val_ids in TiledLayout_TV. // Then, we only need the shape+layout of those size<0>(tensor) elements in TiledLayout_TV // and that shape is what we gather from the other modes of tensor auto V = size<0>(tensor); auto frg_layout_mn = upcast<TiledNumThr{} * V>(right_inverse(TiledLayout_TV{}).with_shape(shape(Tiler_MN{}))); // (m,n) -> v_idx -- The shape and order of the V inside of TiledLayout_TV auto frg_layout_v = zipped_divide(logical_product(make_layout(V), right_inverse(frg_layout_mn)), make_layout(AtomNumVal{})); // (atom_vals,rest_vals) -> (v,m,n) /// ======= // Tile the tensor for TileFrg auto t_tensor = zipped_divide(tensor, prepend(product_each(shape(frg_layout_mn)), V)); // ((TileV,TileM,TileN,...),(1,RestM,RestN,...)) // Transform the tile mode auto v_tensor = t_tensor.compose(frg_layout_v, _); // ((atom_vals,rest_vals),(1,RM,RN,...)) // Unfold and return return v_tensor(_, append<R>(Int<0>{},_)); } CUTE_HOST_DEVICE constexpr static auto get_layoutS_TV() { // (M,N) -> (M,N) auto ref_S = make_layout(make_shape(shape(Tiler_MN{}), Int<1>{})); // (thr_idx,val_idx) -> (M,N) return tile2thrfrg(ref_S, right_inverse(AtomLayoutRef{}).compose(AtomLayoutSrc{}))(_,_,Int<0>{}); } CUTE_HOST_DEVICE constexpr static auto get_layoutS_MN() { // (thr_idx,val_idx) -> (M,N) auto layoutS_TV = get_layoutS_TV(); // (M,K) -> (thr_idx,val_idx) auto layoutS_MK = right_inverse(layoutS_TV).with_shape(shape(Tiler_MN{})); // athrid = (v,m,k) -> thr_idx auto thrID_S = make_layout(size<0>(TiledLayout_TV{})); return cute::make_tuple(layoutS_MK, thrID_S); } CUTE_HOST_DEVICE constexpr static auto get_layoutD_TV() { // (M,N) -> (M,N) auto ref_D = make_layout(make_shape(shape(Tiler_MN{}), Int<1>{})); // (thr_idx,val_idx) -> (M,N) return tile2thrfrg(ref_D, right_inverse(AtomLayoutRef{}).compose(AtomLayoutDst{}))(_,_,Int<0>{}); } CUTE_HOST_DEVICE constexpr static auto get_layoutD_MN() { // (thr_idx,val_idx) -> (M,N) auto layoutD_TV = get_layoutD_TV(); // (M,K) -> (thr_idx,val_idx) auto layoutD_MK = right_inverse(layoutD_TV).with_shape(shape(Tiler_MN{})); // athrid = (v,m,k) -> thr_idx auto thrID_D = make_layout(size<0>(TiledLayout_TV{})); return cute::make_tuple(layoutD_MK, thrID_D); } template <class ThrIdx, __CUTE_REQUIRES(is_integral<ThrIdx>::value)> CUTE_HOST_DEVICE static auto get_slice(ThrIdx const& thr_idx) { return ThrCopy<TiledCopy, ThrIdx>(thr_idx); } template <class ThrIdx, __CUTE_REQUIRES(is_integral<ThrIdx>::value)> CUTE_HOST_DEVICE static auto get_thread_slice(ThrIdx const& thr_idx) { return get_slice(thr_idx); } }; template <class TiledCopy, class ThrIdx> struct ThrCopy { ThrIdx thr_idx_; CUTE_HOST_DEVICE ThrCopy(ThrIdx const& thr_idx) : thr_idx_(thr_idx) {} template <class STensor> CUTE_HOST_DEVICE auto partition_S(STensor&& stensor) const { //static_assert(sizeof(typename remove_cvref_t<STensor>::value_type) == sizeof(typename TiledCopy::ValType), // "Expected ValType for tiling SrcTensor."); auto thr_tensor = make_tensor(static_cast<STensor&&>(stensor).data(), TiledCopy::tidfrg_S(stensor.layout())); return thr_tensor(thr_idx_, _, repeat<rank_v<STensor>>(_)); } template <class DTensor> CUTE_HOST_DEVICE auto partition_D(DTensor&& dtensor) const { //static_assert(sizeof(typename remove_cvref_t<DTensor>::value_type) == sizeof(typename TiledCopy::ValType), // "Expected ValType for tiling DstTensor."); auto thr_tensor = make_tensor(static_cast<DTensor&&>(dtensor).data(), TiledCopy::tidfrg_D(dtensor.layout())); return thr_tensor(thr_idx_, _, repeat<rank_v<DTensor>>(_)); } template <class STensor> CUTE_HOST_DEVICE static auto retile_S(STensor&& stensor) { // static_assert(sizeof(typename remove_cvref_t<STensor>::value_type) == sizeof(typename TiledCopy::ValType), // "Expected ValType for tiling SrcTensor."); return make_tensor(static_cast<STensor&&>(stensor).data(), TiledCopy::retile(stensor.layout())); } template <class DTensor> CUTE_HOST_DEVICE static auto retile_D(DTensor&& dtensor) { // static_assert(sizeof(typename remove_cvref_t<DTensor>::value_type) == sizeof(typename TiledCopy::ValType), // "Expected ValType for tiling DstTensor."); return make_tensor(static_cast<DTensor&&>(dtensor).data(), TiledCopy::retile(dtensor.layout())); } }; template <class... Args, class LayoutCopy_TV, class Tiler> CUTE_HOST_DEVICE auto make_tiled_copy_impl(Copy_Atom<Args...> const& atom, LayoutCopy_TV const&, Tiler const&) { return TiledCopy<Copy_Atom<Args...>, LayoutCopy_TV, Tiler>{atom}; } // // These tile the Copy_Atom as a whole // template <class... CArgs, class... MArgs> CUTE_HOST_DEVICE auto make_tiled_copy_A(Copy_Atom<CArgs...> const& copy_atom, TiledMMA<MArgs...> const& mma) { return make_tiled_copy_impl(copy_atom, mma.get_layoutA_TV(), make_shape(tile_size<0>(mma),tile_size<2>(mma))); } template <class... CArgs, class... MArgs> CUTE_HOST_DEVICE auto make_tiled_copy_B(Copy_Atom<CArgs...> const& copy_atom, TiledMMA<MArgs...> const& mma) { return make_tiled_copy_impl(copy_atom, mma.get_layoutB_TV(), make_shape(tile_size<1>(mma),tile_size<2>(mma))); } template <class... CArgs, class... MArgs> CUTE_HOST_DEVICE auto make_tiled_copy_C(Copy_Atom<CArgs...> const& copy_atom, TiledMMA<MArgs...> const& mma) { return make_tiled_copy_impl(copy_atom, mma.get_layoutC_TV(), make_shape(tile_size<0>(mma),tile_size<1>(mma))); } // returns the smallest tiled copy that can retile LayoutC_TV // for use with pipelined epilogues with subtiled stores template <class... CArgs, class... MArgs> CUTE_HOST_DEVICE auto make_tiled_copy_C_atom(Copy_Atom<CArgs...> const& copy_atom, TiledMMA<MArgs...> const& mma) { // Truncate the V-layout to just the Copy_Atom, keep the V-order auto layoutC_TV = mma.get_layoutC_TV(); auto copy_V = Int<Copy_Atom<CArgs...>::NumValSrc>{}; CUTE_STATIC_ASSERT_V(copy_V <= size<1>(layoutC_TV)); auto layout_TV = composition(layoutC_TV, make_layout(make_shape(size<0>(layoutC_TV), copy_V))); // Recompute tiler and restride the TV layout for the new tiler // Tiler -- Find the active elements in the MMA tensor and generate a tiler to extract them // Convert to the awkward by-mode tiler to preserve the modes of the tiled MMA auto mma_tiler = make_shape(tile_size<0>(mma),tile_size<1>(mma)); auto mma_zeros = repeat_like(mma_tiler, Int<0>{}); auto tiler = transform(make_seq<rank(mma_tiler)>{}, [&](auto i) { return filter(composition(make_layout(mma_tiler, replace<i>(mma_zeros, Int<1>{})), layout_TV)); }); // Layout_TV -- Find the (tid,vid) -> tile coord transformation // Apply the tiler to a reference and transform the codomain // tile_coord -> mma_coord auto tile2mma = composition(make_layout(mma_tiler), tiler); // (tid,vid) -> tile_coord auto layout_tv = composition(left_inverse(tile2mma), layout_TV); return make_tiled_copy_impl(copy_atom, layout_tv, tiler); } /** Produce a TiledCopy from logical thread and values layouts. * The thread and value layouts map coordinates to thr_idx and val_idx. * The product of these layouts is taken to produce the TV layout and the Tiler. * Useful when threads and values need very specific mappings onto coordinates * in the target tensors. */ template <class... Args, class ThrLayout, class ValLayout = Layout<_1>> CUTE_HOST_DEVICE auto make_tiled_copy(Copy_Atom<Args...> const& copy_atom, ThrLayout const& thr_layout = {}, // (m,n) -> thr_idx ValLayout const& val_layout = {}) // (m,n) -> val_idx { // Take the raked_products to compute the Layout_MN // (M,N) -> (thr_idx, val_idx) auto layout_mn = raked_product(thr_layout, val_layout); // (thr_idx, val_idx) -> (M,N) auto layout_tv = right_inverse(layout_mn).with_shape(make_shape(size(thr_layout), size(val_layout))); // Tiler for extracting relevant elements // (M,N) -> tensor coord auto tiler = product_each(shape(layout_mn)); #if 0 print("thr_layout: "); print(thr_layout); print("\n"); print("val_layout: "); print(val_layout); print("\n"); print("layout_mn : "); print(layout_mn); print("\n"); print("layout_tv : "); print(layout_tv); print("\n"); print("tiler : "); print(tiler); print("\n"); #endif return make_tiled_copy_impl(copy_atom, layout_tv, tiler); } /** Produce a TiledCopy from thread and value offset maps. * The TV Layout maps threads and values to the codomain of the data_layout. * It is verified that the intended codomain is valid within data_layout. * Useful when threads and values don't care about owning specific coordinates, but * care more about the vector-width and offsets between them. */ template <class... Args, class AtomTVLayout, class DataLayout> CUTE_HOST_DEVICE constexpr auto make_cotiled_copy(Copy_Atom<Args...> const& copy_atom, AtomTVLayout const& atom_tv_layout, // atom (thr,val) -> data addr DataLayout const& data_layout) // coord -> data addr The target layout { static_assert(is_static<AtomTVLayout>::value); static_assert(is_static<DataLayout>::value); // data addr -> data coord Append 1:0 so off-the-ends get the stride-0 auto inv_data_layout = make_layout(left_inverse(data_layout), Layout<_1,_0>{}); // (tid,vid) -> data_coord auto layout_tv_data = composition(inv_data_layout, atom_tv_layout); // Check validity CUTE_STATIC_ASSERT_V(coalesce(composition(data_layout, layout<1>(layout_tv_data))) == coalesce(layout<1>(atom_tv_layout)), "The memory pointed to by AtomTVLayout does not exist in the DataLayout."); #if 0 if (thread0()) { print("data_layout : "); print(data_layout); print("\n"); print("atom_tv_layout : "); print(atom_tv_layout); print("\n"); print("layout_tv_data : "); print(layout_tv_data); print("\n"); } #endif // // Tiler -- Find the active elements in the DATA tensor and generate a tiler to extract them // // Convert to the awkward by-mode tiler to preserve the modes of the tiled DATA auto flat_data_shape = product_each(shape(data_layout)); auto flat_data_zeros = repeat<rank(flat_data_shape)>(Int<0>{}); auto tiler = transform(make_seq<rank(flat_data_shape)>{}, [&](auto i) { return filter(composition(make_layout(flat_data_shape, replace<i>(flat_data_zeros, Int<1>{})), layout_tv_data)); }); // // Layout_TV -- Find the (tid,vid) -> tile coord transformation // // Apply the tiler to a reference and transform the codomain // tile_coord -> data_coord auto tile2data = composition(make_layout(flat_data_shape), tiler); // (tid,vid) -> tile_coord auto layout_tv = composition(left_inverse(tile2data), layout_tv_data); #if 0 if (thread0()) { print("tiler : "); print(tiler); print("\n"); print("tile2data : "); print(tile2data); print("\n"); print("layout_tv : "); print(layout_tv); print("\n"); } #endif return make_tiled_copy_impl(copy_atom, layout_tv, tiler); } // Make a TiledCopy out of the copy_atom that matches the Src-Layout of tiled_copy template <class... Args, class TiledCopy> CUTE_HOST_DEVICE auto make_tiled_copy_S(Copy_Atom<Args...> const& copy_atom, TiledCopy const& tiled_copy) { return make_tiled_copy_impl(copy_atom, tiled_copy.get_layoutS_TV(), typename TiledCopy::Tiler_MN{}); } // Make a TiledCopy out of the copy_atom that matches the Dst-Layout of tiled_copy template <class... Args, class TiledCopy> CUTE_HOST_DEVICE auto make_tiled_copy_D(Copy_Atom<Args...> const& copy_atom, TiledCopy const& tiled_copy) { return make_tiled_copy_impl(copy_atom, tiled_copy.get_layoutD_TV(), typename TiledCopy::Tiler_MN{}); } // // Size // // The logical size of a TileCopy template <int... I, class... Args> CUTE_HOST_DEVICE constexpr auto tile_size(TiledCopy<Args...> const&) { return size<I...>(typename TiledCopy<Args...>::Tiler_MN{}); } // The number of threads involved in a TiledCopy template <class... Args> CUTE_HOST_DEVICE constexpr auto size(TiledCopy<Args...> const&) { return typename TiledCopy<Args...>::TiledNumThr{}; } // // Display utilities // template <class... Args, class T> CUTE_HOST_DEVICE void print(Copy_Atom<Copy_Traits<Args...>, T> const&) { using Atom = Copy_Atom<Copy_Traits<Args...>, T>; print("Copy_Atom\n"); print(" ThrID: "); print(typename Atom::ThrID{}); print("\n"); print(" ValLayoutSrc: "); print(typename Atom::ValLayoutSrc{}); print("\n"); print(" ValLayoutDst: "); print(typename Atom::ValLayoutDst{}); print("\n"); print(" ValLayoutRef: "); print(typename Atom::ValLayoutRef{}); print("\n"); print(" ValueType: "); print(sizeof_bits<typename Atom::ValType>::value); print("b\n"); } template <class Atom, class... Args> CUTE_HOST_DEVICE void print(TiledCopy<Atom, Args...> const& copy, char const* pad = "") { using Copy = TiledCopy<Atom, Args...>; print("TiledCopy\n"); print(" Tiler_MN: "); print(typename Copy::Tiler_MN{}); print("\n"); print(" TiledLayout_TV: "); print(typename Copy::TiledLayout_TV{}); print("\n"); print(static_cast<Atom const&>(copy)); } template <class TiledCopy, class ThrIdx> CUTE_HOST_DEVICE void print(ThrCopy<TiledCopy, ThrIdx> const& thr_copy) { print("ThrCopy\n"); print(" ThrIdx: "); print(thr_copy.thr_idx_); print("\n"); print(TiledCopy{}); } template <class... Args> CUTE_HOST_DEVICE auto print_latex(TiledCopy<Args...> const& copy) { auto [layoutS_MN, thrID_S] = copy.get_layoutS_MN(); auto [layoutD_MN, thrID_D] = copy.get_layoutD_MN(); print_latex_copy(layoutS_MN, thrID_S, layoutD_MN, thrID_D); } // MNK Copy Layout to Latex TIKZ -- 8-value color coded by thread template <class LayoutS, class ThrIDS, class LayoutD, class ThrIDD> CUTE_HOST_DEVICE void print_latex_copy(LayoutS const& S, ThrIDS const& TS, // (m,n) -> (tid,vid) and tid -> thr_idx LayoutD const& D, ThrIDD const& TD) // (m,n) -> (tid,vid) and tid -> thr_idx { CUTE_STATIC_ASSERT_V(rank(S) == Int<2>{}); CUTE_STATIC_ASSERT_V(rank(D) == Int<2>{}); assert(size<0>(S) == size<0>(D)); assert(size<1>(S) == size<1>(D)); char const* latex_header = "\\documentclass{standalone}\n" "\\usepackage{tikz}\n" "\\usetikzlibrary{external}\n" "\\tikzexternalize\n" "\\begin{document}\n" "\\begin{tikzpicture}[x={(0cm,-1cm)},y={(1cm,0cm)},box/.style={rectangle,draw=black,thick,minimum size=1cm,anchor=center}]\n\n"; char const* latex_footer = "\\end{tikzpicture}\n" "\\end{document}\n"; char const* color_map[8] = {"{rgb,255:red,175;green,175;blue,255}", "{rgb,255:red,175;green,255;blue,175}", "{rgb,255:red,255;green,255;blue,175}", "{rgb,255:red,255;green,175;blue,175}", "{rgb,255:red,210;green,210;blue,255}", "{rgb,255:red,210;green,255;blue,210}", "{rgb,255:red,255;green,255;blue,210}", "{rgb,255:red,255;green,210;blue,210}",}; // Header printf("%% LayoutS: "); print(S); printf("\n"); printf("%% ThrIDS : "); print(TS); printf("\n"); printf("%% LayoutD: "); print(D); printf("\n"); printf("%% ThrIDD : "); print(TD); printf("\n\n"); printf(latex_header); // S starting at 0,0 for (int i = 0; i < size<0>(S); ++i) { for (int j = 0; j < size<1>(S); ++j) { int thrid = S(i,j) % size(TS); int val_idx = S(i,j) / size(TS); int thr_idx = TS(thrid); printf("\\node[box,fill=%s] at (%d,%d) {\\shortstack{T%d \\\\ V%d}};\n", color_map[thr_idx % 8], i, j, thr_idx, val_idx); } } // D starting at 0,size<1>(S)+3 for (int i = 0; i < size<0>(D); ++i) { for (int j = 0; j < size<1>(D); ++j) { int thrid = D(i,j) % size(TD); int val_idx = D(i,j) / size(TD); int thr_idx = TD(thrid); printf("\\node[box,fill=%s] at (%d,%d) {\\shortstack{T%d \\\\ V%d}};\n", color_map[thr_idx % 8], i, j + size<1>(S) + 3, thr_idx, val_idx); } } // S Labels for (int i = 0, j = -1; i < size<0>(S); ++i) { printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, i); } for (int j = 0, i = -1; j < size<1>(S); ++j) { printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, j); } // D Labels for (int i = 0, j = size<1>(D); i < size<0>(S); ++i) { printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j + size<1>(S) + 3, i); } for (int j = 0, i = -1; j < size<1>(D); ++j) { printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j + size<1>(S) + 3, j); } // Footer printf(latex_footer); } } // end namespace cute //////////////////////////////////////////////////////////////////////////////////////////////////// #include <cute/atom/copy_traits_sm50.hpp> #include <cute/atom/copy_traits_sm75.hpp> #include <cute/atom/copy_traits_sm80.hpp> #include <cute/atom/copy_traits_sm90.hpp> // Config #if (__CUDACC_VER_MAJOR__ >= 12) # define CUTE_COPY_ATOM_TMA_SM90_ENABLED #endif #if defined(CUTE_COPY_ATOM_TMA_SM90_ENABLED) #include <cute/atom/copy_traits_sm90_tma.hpp> #endif ////////////////////////////////////////////////////////////////////////////////////////////////////
include/cute/atom/copy_atom.hpp/0
{ "file_path": "include/cute/atom/copy_atom.hpp", "repo_id": "include", "token_count": 11357 }
11
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/arch/mma_sm90.hpp> #include <cute/atom/mma_traits.hpp> #include <cute/tensor.hpp> namespace cute { // Fence between the async destination accumulators of GMMA & source for their dependent use template <class Engine, class Layout> CUTE_HOST_DEVICE void warpgroup_fence_operand(Tensor<Engine, Layout>& frg) { CUTE_STATIC_ASSERT(is_static<Layout>::value); if constexpr (is_same_v<typename Engine::value_type, float>) { auto f32_frg = recast<float>(frg); CUTE_UNROLL for (int i = 0; i < size(f32_frg); ++i) { warpgroup_fence_operand(f32_frg(i)); } } else { CUTE_STATIC_ASSERT(is_rmem<Engine>::value); auto u32_frg = recast<uint32_t>(frg); CUTE_UNROLL for (int i = 0; i < size(u32_frg); ++i) { warpgroup_fence_operand(u32_frg(i)); } } } namespace GMMA { /////////////////////////////////////////// // Common layouts for GMMA Shared Memory // /////////////////////////////////////////// // M|N-major GMMA layouts in units of bits using Layout_MN_INTER_Atom_Bits = ComposedLayout<Swizzle<0,4,3>, smem_ptr_flag, Layout<Shape< _128,_8>,Stride<_1, _128>>>; using Layout_MN_SW32_Atom_Bits = ComposedLayout<Swizzle<1,4,3>, smem_ptr_flag, Layout<Shape< _256,_8>,Stride<_1, _256>>>; using Layout_MN_SW64_Atom_Bits = ComposedLayout<Swizzle<2,4,3>, smem_ptr_flag, Layout<Shape< _512,_8>,Stride<_1, _512>>>; using Layout_MN_SW128_Atom_Bits = ComposedLayout<Swizzle<3,4,3>, smem_ptr_flag, Layout<Shape<_1024,_8>,Stride<_1,_1024>>>; // K-major GMMA layouts in units of bits using Layout_K_INTER_Atom_Bits = ComposedLayout<Swizzle<0,4,3>, smem_ptr_flag, Layout<Shape<_8, _128>,Stride< _128,_1>>>; using Layout_K_SW32_Atom_Bits = ComposedLayout<Swizzle<1,4,3>, smem_ptr_flag, Layout<Shape<_8, _256>,Stride< _256,_1>>>; using Layout_K_SW64_Atom_Bits = ComposedLayout<Swizzle<2,4,3>, smem_ptr_flag, Layout<Shape<_8, _512>,Stride< _512,_1>>>; using Layout_K_SW128_Atom_Bits = ComposedLayout<Swizzle<3,4,3>, smem_ptr_flag, Layout<Shape<_8,_1024>,Stride<_1024,_1>>>; // M|N-major layouts in units of Type template <class Type> using Layout_MN_INTER_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_MN_INTER_Atom_Bits{})); template <class Type> using Layout_MN_SW32_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_MN_SW32_Atom_Bits{})); template <class Type> using Layout_MN_SW64_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_MN_SW64_Atom_Bits{})); template <class Type> using Layout_MN_SW128_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_MN_SW128_Atom_Bits{})); // K-major layouts in units of Type template <class Type> using Layout_K_INTER_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_K_INTER_Atom_Bits{})); template <class Type> using Layout_K_SW32_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_K_SW32_Atom_Bits{})); template <class Type> using Layout_K_SW64_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_K_SW64_Atom_Bits{})); template <class Type> using Layout_K_SW128_Atom = decltype(upcast<sizeof_bits<Type>::value>(Layout_K_SW128_Atom_Bits{})); // With GMMA::Major param template <class Type, GMMA::Major tnsp> using Layout_INTER_Atom = typename conditional<tnsp == GMMA::Major::MN, Layout_MN_INTER_Atom<Type>, Layout_K_INTER_Atom<Type>>::type; template <class Type, GMMA::Major tnsp> using Layout_SW32_Atom = typename conditional<tnsp == GMMA::Major::MN, Layout_MN_SW32_Atom<Type>, Layout_K_SW32_Atom<Type>>::type; template <class Type, GMMA::Major tnsp> using Layout_SW64_Atom = typename conditional<tnsp == GMMA::Major::MN, Layout_MN_SW64_Atom<Type>, Layout_K_SW64_Atom<Type>>::type; template <class Type, GMMA::Major tnsp> using Layout_SW128_Atom = typename conditional<tnsp == GMMA::Major::MN, Layout_MN_SW128_Atom<Type>, Layout_K_SW128_Atom<Type>>::type; // // Tensor (position-dependent swizzle) to LayoutType utility // template <class Engine, class Shape, class Stride> CUTE_HOST_DEVICE constexpr LayoutType layout_type(Tensor<Engine, Layout<Shape,Stride>> const&) { static_assert(is_same<uint128_t, typename Engine::value_type>::value, "Expected uint128_t type in LayoutType conversion."); using Swizzle = get_swizzle_t<Engine>; constexpr int B = Swizzle::num_bits; constexpr int M = Swizzle::num_base; constexpr int S = Swizzle::num_shft; static_assert(M == 4, "Unsupported layout swizzle"); static_assert(0 <= B && B <= 3, "Unsupported layout swizzle"); static_assert(S == 3, "Unsupported layout swizzle"); switch (B) { case 0: return LayoutType::INTERLEAVE; case 1: return LayoutType::B32; case 2: return LayoutType::B64; case 3: return LayoutType::B128; } return LayoutType::INTERLEAVE; // ERROR } /////////////////////////////////////////////////////////////////////////////// // Construction method for GMMA Descriptors /////////////////////////////////////////////////////////////////////////////// /** * /////////////////////////////// * // make_gmma_desc<Major::MN> // * /////////////////////////////// * Each GmmaDescriptor Major-MN describes a canonical layout of the form * * LayoutType::INTERLEAVE : Swizzle<0,4,3> o smem_ptr o ((T,1,m),(8,k)):((1,T,SBO),(1T,LBO)) * LayoutType::B32 : Swizzle<1,4,3> o smem_ptr o ((T,2,m),(8,k)):((1,T,LBO),(2T,SBO)) * LayoutType::B64 : Swizzle<2,4,3> o smem_ptr o ((T,4,m),(8,k)):((1,T,LBO),(4T,SBO)) * LayoutType::B128 : Swizzle<3,4,3> o smem_ptr o ((T,8,m),(8,k)):((1,T,LBO),(8T,SBO)) * * where * T : sizeof(uint128_t) / sizeof(value_type) * m : integer in [1,16] corresponding to GMMA shape * k : integer in [1,32] corresponding to GMMA shape * SBO: stride byte offset * LBO: leading byte offset * * See GMMA::Layout_MN_XXX_Atom<value_type> for building canonical GmmaDescriptor Major-MN layouts. * For example, * auto smem_layout = tile_to_shape(Layout_MN_SW128_Atom<value_type>{}, Shape<_128,_64>{}); * is guaranteed to be accepted by make_gmma_desc<Major::MN> for appropriate value_type. * * ////////////////////////////// * // make_gmma_desc<Major::K> // * ////////////////////////////// * Each GmmaDescriptor Major-K describes a canonical layout of the form * * LayoutType::INTERLEAVE : Swizzle<0,4,3> o smem_ptr o ((8,m),(T,2)):((1T,SBO),(1,LBO)) * LayoutType::B32 : Swizzle<1,4,3> o smem_ptr o ((8,m),(T,2)):((2T,SBO),(1, T )) * LayoutType::B64 : Swizzle<2,4,3> o smem_ptr o ((8,m),(T,2)):((4T,SBO),(1, T )) * LayoutType::B128 : Swizzle<3,4,3> o smem_ptr o ((8,m),(T,2)):((8T,SBO),(1, T )) * * See GMMA::Layout_K_XXX_Atom<value_type> for building canonical GmmaDescriptor Major-K layouts. * For example, * auto smem_layout = tile_to_shape(Layout_K_SW128_Atom<value_type>{}, Shape<_128,_64>{}); * is guaranteed to be accepted by make_gmma_desc<Major::K> for appropriate value_type. */ template <GMMA::Major MajorMode, class TEngine, class TLayout> CUTE_HOST_DEVICE constexpr GmmaDescriptor make_gmma_desc(Tensor<TEngine,TLayout> const& tensor) { static_assert(is_smem<TEngine>::value, "GMMA Descriptors can only be constructed on smem."); static_assert(TLayout::rank == 2, "GMMA Descriptors can only be constructed on rank-2 tensors."); using value_type = typename TEngine::value_type; Tensor u128_tensor = recast<uint128_t const>(tensor); // Result GmmaDescriptor desc; // Layout type constexpr GMMA::LayoutType LAYOUT_TYPE = GMMA::layout_type(u128_tensor); desc.bitfield.layout_type_ = uint8_t(LAYOUT_TYPE); // Start address (4LSB not included) uint32_t start_address = cast_smem_ptr_to_uint(raw_pointer_cast(u128_tensor.data())); desc.bitfield.start_address_ = static_cast<uint16_t>(start_address >> 4); constexpr uint8_t base_offset = 0; desc.bitfield.base_offset_ = base_offset; // LayoutType meta constexpr int W = LAYOUT_TYPE == GMMA::LayoutType::INTERLEAVE ? 1 : LAYOUT_TYPE == GMMA::LayoutType::B32 ? 2 : LAYOUT_TYPE == GMMA::LayoutType::B64 ? 4 : LAYOUT_TYPE == GMMA::LayoutType::B128 ? 8 : -1; if constexpr (MajorMode == GMMA::Major::MN) { /* In units of uint128_t, each GmmaDescriptor Major-MN describes a canonical layout of the form * * LayoutType::INTERLEAVE : Swizzle<0,4,3> o smem_ptr o ((1,n),(8,k)):((X,SBO),(1,LBO)) * LayoutType::B32 : Swizzle<1,4,3> o smem_ptr o ((2,n),(8,k)):((1,LBO),(2,SBO)) * LayoutType::B64 : Swizzle<2,4,3> o smem_ptr o ((4,n),(8,k)):((1,LBO),(4,SBO)) * LayoutType::B128 : Swizzle<3,4,3> o smem_ptr o ((8,n),(8,k)):((1,LBO),(8,SBO)) */ static_assert(size<1>(u128_tensor) == Int<(256 / cute::sizeof_bits<value_type>::value)>{}, // K size "Not a canonical GMMA_MN Layout: Expected K-size 256/sizeof_bits<T>."); // Construct the canonical GMMA T Layout with shape ((W,n),(8,2)) Layout canonical_layout = logical_divide(layout(u128_tensor), make_tile(Layout<Int<W>,_1>{}, Layout<Int<8>,_1>{})); // Check ranks of canonical CUTE_STATIC_ASSERT_V(rank<0>(canonical_layout) == Int<2>{}, "Not a canonical GMMA_MN Layout: No flat offset mode"); CUTE_STATIC_ASSERT_V(rank<1>(canonical_layout) == Int<2>{}, "Not a canonical GMMA_MN Layout: No flat offset mode"); // Check canonical mode strides constexpr uint32_t stride_00 = stride<0,0>(canonical_layout); constexpr uint32_t expected_stride_00 = LAYOUT_TYPE == GMMA::LayoutType::INTERLEAVE ? stride<0,0>(canonical_layout) : 1; static_assert(stride_00 == expected_stride_00, "Not a canonical GMMA_MN Layout: Expected stride failure."); constexpr uint32_t stride_10 = stride<1,0>(canonical_layout); constexpr uint32_t expected_stride_10 = W; static_assert(stride_10 == expected_stride_10, "Not a canonical GMMA_MN Layout: Expected stride failure."); // stride dimension byte offset and leading dimension byte offset (4LSB not included == uint128_t units) constexpr uint32_t stride_01 = stride<0,1>(canonical_layout); constexpr uint32_t stride_11 = stride<1,1>(canonical_layout); desc.bitfield.stride_byte_offset_ = (LAYOUT_TYPE == GMMA::LayoutType::INTERLEAVE) ? stride_01 : stride_11; desc.bitfield.leading_byte_offset_ = (LAYOUT_TYPE == GMMA::LayoutType::INTERLEAVE) ? stride_11 : stride_01; } else if constexpr (MajorMode == GMMA::Major::K) { /* In units of uint128_t, each GmmaDescriptor Major-K describes a canonical layout of the form * * LayoutType::INTERLEAVE : Swizzle<0,4,3> o smem_ptr o ((8,n),2):((1,SBO),LBO) * LayoutType::B32 : Swizzle<1,4,3> o smem_ptr o ((8,n),2):((2,SBO),1) * LayoutType::B64 : Swizzle<2,4,3> o smem_ptr o ((8,n),2):((4,SBO),1) * LayoutType::B128 : Swizzle<3,4,3> o smem_ptr o ((8,n),2):((8,SBO),1) */ CUTE_STATIC_ASSERT_V(size<0>(u128_tensor) % Int<8>{} == Int<0>{}, // N|M size "Not a canonical GMMA_K Layout: Expected MN-size multiple of 8."); CUTE_STATIC_ASSERT_V(size<1>(u128_tensor) == Int<2>{}, // K size "Not a canonical GMMA_K Layout: Expected K-size 2 (in units of uint128_t)."); // Construct the canonical GMMA N Layout with shape ((8,n),(2,1)) Layout canonical_layout = logical_divide(layout(u128_tensor), make_tile(Layout<_8,_1>{}, Layout<_2,_1>{})); // Check ranks of canonical CUTE_STATIC_ASSERT_V(rank<0>(canonical_layout) == Int<2>{}, "Not a canonical GMMA_K Layout: No flat offset mode"); CUTE_STATIC_ASSERT_V(rank<1>(canonical_layout) == Int<2>{}, "Not a canonical GMMA_K Layout: No flat offset mode"); // Check canonical mode strides constexpr uint32_t stride_00 = stride<0,0>(canonical_layout); constexpr uint32_t expected_stride_00 = W; static_assert(stride_00 == expected_stride_00, "Not a canonical GMMA_K Layout: Expected stride failure."); constexpr uint32_t stride_10 = stride<1,0>(canonical_layout); constexpr uint32_t expected_stride_10 = (LAYOUT_TYPE == GMMA::LayoutType::INTERLEAVE) ? stride<1,0>(canonical_layout) : 1; static_assert(stride_10 == expected_stride_10, "Not a canonical GMMA_K Layout: Expected stride failure."); // stride dimension byte offset and leading dimension byte offset (4LSB not included == uint128_t units) constexpr uint32_t stride_01 = stride<0,1>(canonical_layout); desc.bitfield.stride_byte_offset_ = stride_01; desc.bitfield.leading_byte_offset_ = stride_10; } else { static_assert(MajorMode != GMMA::Major::MN && MajorMode != GMMA::Major::K, "Unrecognized MajorMode!"); } #if 0 // DEBUG and SANITY assert((start_address & 0b0000001111) == 0); // Must be 16B aligned (4LSB are 0) no negotiation assert((start_address & 0b1110000000) == 0); // Assert base_offset is 0, generalize later if (thread0()) { print("smem_desc input tensor: "); print(tensor.data()); print(" o "); print(tensor.layout()); print("\n"); print("smem_desc uint128_t tensor: "); print(u128_tensor.data()); print(" o "); print(u128_tensor.layout()); print("\n"); //print(" desc canonical layout: "); print(canonical_layout); print("\n"); print(desc); } #endif return desc; } /////////////////////////////////////////////////////////////////////////////// // Higher level GMMA Descriptor utilities /////////////////////////////////////////////////////////////////////////////// struct DescriptorIterator { using reference = GmmaDescriptor; using element_type = GmmaDescriptor; using value_type = GmmaDescriptor; GmmaDescriptor desc_; // Dereference returns the GmmaDescriptor CUTE_HOST_DEVICE constexpr reference operator*() const { return desc_; } // Advance and return a new GmmaDescriptor template <class Index> CUTE_HOST_DEVICE constexpr reference operator[](Index const& i) const { return *(*this + i); } // Return an advanced iterator template <class Index> CUTE_HOST_DEVICE constexpr DescriptorIterator operator+(Index const& offset) const { return { GmmaDescriptor{desc_ + uint64_t(offset)} }; } }; template <class T> CUTE_HOST_DEVICE constexpr GmmaDescriptor raw_pointer_cast(DescriptorIterator const& ptr) { return ptr.desc_; } // Recast a DescriptorIterator Tensor to uint64_t, it's RegType in mma_unpack template <class NewT> CUTE_HOST_DEVICE constexpr DescriptorIterator recast_ptr(DescriptorIterator const& iter) { static_assert(is_same<NewT, uint64_t>::value, "Can only cast GmmaDescriptorIterator to uint64_t."); return iter; // Do nothing, it will still dereference to GmmaDescriptor and decay to uint64_t } CUTE_HOST_DEVICE void print(DescriptorIterator) { printf("GMMA::DescriptorIterator"); } // The GMMA Traits below have custom fragment type flags for their smem desc tensors. // These flags specialize a MakeTensor customization point to correctly make the fragment that is desired. template <GMMA::Major> struct smem_desc : DescriptorIterator {}; } // end namespace GMMA // Customization point for creating a GMMA::smem_desc Tensor template <GMMA::Major MajorMode> struct MakeTensor<GMMA::smem_desc<MajorMode>> { template <class TEngine, class TLayout> CUTE_HOST_DEVICE constexpr auto operator()(Tensor<TEngine,TLayout> const& smem_tensor) { static_assert(is_smem<TEngine>::value, "Expected SMEM Tensor to construct a GMMA Desc Tensor"); return make_tensor(GMMA::DescriptorIterator{GMMA::make_gmma_desc<MajorMode>(tensor<0>(smem_tensor))}, replace<0>(recast<uint128_t const>(smem_tensor).layout(), Layout<_1,_0>{})); } }; /////////////////////////////////////////////////////////////////////////////// //////////////////////////// MMA_TRAITS /////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// namespace GMMA { // Accumulator layouts using CLayout_64x8 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8>>>; using CLayout_64x16 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _2>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; using CLayout_64x32 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _4>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; using CLayout_64x48 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _6>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; using CLayout_64x64 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _8>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; using CLayout_64x80 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _10>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; using CLayout_64x96 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _12>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; using CLayout_64x112 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, Int<14>>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; using CLayout_64x128 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _16>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; using CLayout_64x144 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, Int<18>>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; using CLayout_64x160 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, Int<20>>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; using CLayout_64x176 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, Int<22>>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; using CLayout_64x192 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _24>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; using CLayout_64x208 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, Int<26>>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; using CLayout_64x224 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, Int<28>>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; using CLayout_64x240 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, Int<30>>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; using CLayout_64x256 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2,_2, _32>>, Stride<Stride<_128,_1,_16>,Stride<_64,_8,_512>>>; // Register source layout for 32-bit value types using ALayout_64x8 = Layout<Shape <Shape < _4,_8, _4>,Shape < _2, _2>>, Stride<Stride< _64,_1,_16>,Stride< _8,_256>>>; // Register source layout for 16-bit value types using ALayout_64x16 = CLayout_64x16; // Register source layout for 8-bit value types using ALayout_64x32 = Layout<Shape <Shape < _4,_8, _4>,Shape < _4,_2, _2>>, Stride<Stride<_256,_1,_16>,Stride<_64,_8,_1024>>>; // Shared memory source layouts for any value type template <int M, int K> using ABLayout = Layout<Shape <_128,Shape <Int<M>,Int<K>>>, Stride< _0,Stride< _1,Int<M>>>>; } // namespace GMMA template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_8,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 8, 16>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_8,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 8, 16>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_16,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 16, 16>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_16,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 16, 16>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_32,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 32, 16>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_32,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 32, 16>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_48,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 48, 16>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_48,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 48, 16>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_64,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 64, 16>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_64,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 64, 16>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_80,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 80, 16>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_80,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 80, 16>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_96,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 96, 16>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_96,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 96, 16>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_112,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<112, 16>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_112,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<112, 16>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_128,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<128, 16>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_128,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<128, 16>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_144,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<144, 16>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_144,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<144, 16>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_160,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<160, 16>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_160,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<160, 16>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_176,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<176, 16>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_176,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<176, 16>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_192,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<192, 16>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_192,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<192, 16>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_208,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<208, 16>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_208,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<208, 16>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_224,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<224, 16>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_224,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<224, 16>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_240,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<240, 16>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_240,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<240, 16>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x16_F16F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_256,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<256, 16>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x16_F16F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_256,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<256, 16>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_8,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 8, 16>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_8,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 8, 16>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_16,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 16, 16>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_16,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 16, 16>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_32,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 32, 16>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_32,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 32, 16>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_48,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 48, 16>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_48,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 48, 16>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_64,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 64, 16>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_64,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 64, 16>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_80,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 80, 16>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_80,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 80, 16>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_96,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 96, 16>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_96,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 96, 16>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_112,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<112, 16>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_112,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<112, 16>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_128,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<128, 16>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_128,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<128, 16>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_144,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<144, 16>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_144,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<144, 16>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_160,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<160, 16>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_160,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<160, 16>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_176,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<176, 16>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_176,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<176, 16>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_192,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<192, 16>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_192,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<192, 16>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_208,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<208, 16>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_208,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<208, 16>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_224,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<224, 16>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_224,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<224, 16>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_240,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<240, 16>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_240,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<240, 16>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x16_F32F16F16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_256,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<256, 16>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x16_F32F16F16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_256,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<256, 16>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_8,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 8, 16>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_8,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 8, 16>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_16,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 16, 16>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_16,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 16, 16>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_32,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 32, 16>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_32,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 32, 16>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_48,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 48, 16>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_48,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 48, 16>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_64,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 64, 16>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_64,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 64, 16>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_80,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 80, 16>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_80,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 80, 16>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_96,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout< 96, 16>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_96,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout< 96, 16>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_112,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<112, 16>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_112,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<112, 16>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_128,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<128, 16>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_128,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<128, 16>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_144,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<144, 16>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_144,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<144, 16>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_160,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<160, 16>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_160,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<160, 16>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_176,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<176, 16>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_176,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<176, 16>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_192,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<192, 16>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_192,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<192, 16>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_208,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<208, 16>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_208,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<208, 16>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_224,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<224, 16>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_224,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<224, 16>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_240,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<240, 16>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_240,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<240, 16>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x16_F32BF16BF16_SS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<tnspA>; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_256,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 16>; using BLayout = GMMA::ABLayout<256, 16>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::Major tnspA, GMMA::Major tnspB, GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x16_F32BF16BF16_RS<tnspA, tnspB, scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = bfloat16_t; using ValTypeB = bfloat16_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<tnspB>; using Shape_MNK = Shape<_64,_256,_16>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x16; using BLayout = GMMA::ABLayout<256, 16>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout< 8, 8>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout< 8, 8>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout< 16, 8>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout< 16, 8>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout< 32, 8>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout< 32, 8>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout< 48, 8>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout< 48, 8>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout< 64, 8>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout< 64, 8>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout< 80, 8>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout< 80, 8>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout< 96, 8>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout< 96, 8>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout<112, 8>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout<112, 8>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout<128, 8>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout<128, 8>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout<144, 8>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout<144, 8>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout<160, 8>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout<160, 8>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout<176, 8>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout<176, 8>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout<192, 8>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout<192, 8>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout<208, 8>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout<208, 8>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout<224, 8>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout<224, 8>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout<240, 8>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout<240, 8>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x8_F32TF32TF32_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 8>; using BLayout = GMMA::ABLayout<256, 8>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x8_F32TF32TF32_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = tfloat32_t; using ValTypeB = tfloat32_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_8>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x8; using BLayout = GMMA::ABLayout<256, 8>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32S8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32S8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32S8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32S8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32S8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32S8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32S8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32S8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = int8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32U8S8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32U8S8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32U8S8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32U8S8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = int8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32U8U8_SS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32U8U8_SS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x8x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x16x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x32x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x48x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x64x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x80x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x96x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x112x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x128x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x144x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x160x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x176x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x192x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x208x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x224x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <> struct MMA_Traits<SM90_64x240x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32U8U8_RS_TN> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <> struct MMA_Traits<SM90_64x256x32_S32U8U8_RS_TN_SATURATE> { using ValTypeD = int32_t; using ValTypeA = uint8_t; using ValTypeB = uint8_t; using ValTypeC = int32_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F16E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F16E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F32E4M3E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F32E4M3E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F16E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F16E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F32E4M3E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F32E4M3E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e4m3_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F16E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F16E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F32E5M2E4M3_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F32E5M2E4M3_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e4m3_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x8x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_8,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 8, 32>; using CLayout = GMMA::CLayout_64x8; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x16x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_16,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 16, 32>; using CLayout = GMMA::CLayout_64x16; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x32x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_32,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 32, 32>; using CLayout = GMMA::CLayout_64x32; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x48x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_48,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 48, 32>; using CLayout = GMMA::CLayout_64x48; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x64x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_64,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 64, 32>; using CLayout = GMMA::CLayout_64x64; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x80x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_80,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 80, 32>; using CLayout = GMMA::CLayout_64x80; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x96x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_96,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout< 96, 32>; using CLayout = GMMA::CLayout_64x96; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x112x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_112,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<112, 32>; using CLayout = GMMA::CLayout_64x112; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x128x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_128,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<128, 32>; using CLayout = GMMA::CLayout_64x128; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x144x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_144,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<144, 32>; using CLayout = GMMA::CLayout_64x144; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x160x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_160,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<160, 32>; using CLayout = GMMA::CLayout_64x160; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x176x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_176,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<176, 32>; using CLayout = GMMA::CLayout_64x176; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x192x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_192,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<192, 32>; using CLayout = GMMA::CLayout_64x192; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x208x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_208,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<208, 32>; using CLayout = GMMA::CLayout_64x208; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x224x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_224,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<224, 32>; using CLayout = GMMA::CLayout_64x224; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED) template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x240x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_240,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<240, 32>; using CLayout = GMMA::CLayout_64x240; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F16E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F16E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = half_t; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = half_t; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F32E5M2E5M2_SS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeA = GMMA::smem_desc<GMMA::Major::K>; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ABLayout< 64, 32>; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <GMMA::ScaleIn scaleA, GMMA::ScaleIn scaleB> struct MMA_Traits<SM90_64x256x32_F32E5M2E5M2_RS_TN<scaleA, scaleB>> { using ValTypeD = float; using ValTypeA = float_e5m2_t; using ValTypeB = float_e5m2_t; using ValTypeC = float; using FrgTypeB = GMMA::smem_desc<GMMA::Major::K>; using Shape_MNK = Shape<_64,_256,_32>; using ThrID = Layout<_128>; using ALayout = GMMA::ALayout_64x32; using BLayout = GMMA::ABLayout<256, 32>; using CLayout = GMMA::CLayout_64x256; GMMA::ScaleOut accumulate_ = GMMA::ScaleOut::One; }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // end namespace cute
include/cute/atom/mma_traits_sm90_gmma.hpp/0
{ "file_path": "include/cute/atom/mma_traits_sm90_gmma.hpp", "repo_id": "include", "token_count": 189857 }
12
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cstdint> #else #include <cstdint> #endif #include <cutlass/numeric_types.h> namespace cute { // // Signed integers // using int2_t = cutlass::int2b_t; using int4_t = cutlass::int4b_t; using CUTE_STL_NAMESPACE::int8_t; using CUTE_STL_NAMESPACE::int16_t; using CUTE_STL_NAMESPACE::int32_t; using CUTE_STL_NAMESPACE::int64_t; template <int N> struct int_bit; template <> struct int_bit< 2> { using type = cutlass::int2b_t; }; template <> struct int_bit< 4> { using type = cutlass::int4b_t; }; template <> struct int_bit< 8> { using type = int8_t; }; template <> struct int_bit< 16> { using type = int16_t; }; template <> struct int_bit< 32> { using type = int32_t; }; template <> struct int_bit< 64> { using type = int64_t; }; template <int N> using int_bit_t = typename int_bit<N>::type; template <int N> using int_byte = int_bit<8*N>; template <int N> using int_byte_t = typename int_byte<N>::type; // // Unsigned integers // using uint1_t = cutlass::uint1b_t; using uint2_t = cutlass::uint2b_t; using uint4_t = cutlass::uint4b_t; using CUTE_STL_NAMESPACE::uint8_t; using CUTE_STL_NAMESPACE::uint16_t; using CUTE_STL_NAMESPACE::uint32_t; using CUTE_STL_NAMESPACE::uint64_t; using cutlass::uint128_t; template <int N> struct uint_bit; template <> struct uint_bit< 1> { using type = cutlass::uint1b_t; }; template <> struct uint_bit< 2> { using type = cutlass::uint2b_t; }; template <> struct uint_bit< 4> { using type = cutlass::uint4b_t; }; template <> struct uint_bit< 8> { using type = uint8_t; }; template <> struct uint_bit< 16> { using type = uint16_t; }; template <> struct uint_bit< 32> { using type = uint32_t; }; template <> struct uint_bit< 64> { using type = uint64_t; }; template <> struct uint_bit<128> { using type = cutlass::uint128_t; }; template <int N> using uint_bit_t = typename uint_bit<N>::type; template <int N> using uint_byte = uint_bit<8*N>; template <int N> using uint_byte_t = typename uint_byte<N>::type; } // namespace cute
include/cute/numeric/int.hpp/0
{ "file_path": "include/cute/numeric/int.hpp", "repo_id": "include", "token_count": 1299 }
13
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Matrix multiply */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "mma.h" #include "cutlass/layout/matrix.h" #include "cutlass/numeric_types.h" #if ((__CUDACC_VER_MAJOR__ > 10) || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1)) #define CUTLASS_ARCH_MMA_SM70_SUPPORTED #endif #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)) #if ((__CUDACC_VER_MAJOR__ > 10) || (__CUDACC_VER_MAJOR__ == 10 &&__CUDACC_VER_MINOR__ >= 1)) #define CUTLASS_ARCH_MMA_SM70_ENABLED #endif #endif ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace arch { ///////////////////////////////////////////////////////////////////////////////////////////////// // // Matrix multiply accumulate 884 - FP16 accumulation // ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: F16 = F16 * F16 + F16 template <> struct Mma< gemm::GemmShape<8,8,4>, 8, half_t, layout::ColumnMajor, half_t, layout::ColumnMajor, half_t, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::ColumnMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<half_t, 4>; using ElementC = half_t; using LayoutC = layout::RowMajor; using FragmentC = Array<half_t, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); unsigned const *C = reinterpret_cast<unsigned const *>(&c); unsigned *D = reinterpret_cast<unsigned *>(&d); asm volatile("mma.sync.aligned.m8n8k4.col.col.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; /// Matrix multiply-add operation: F16 = F16 * F16 + F16 template <> struct Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, layout::ColumnMajor, half_t, layout::RowMajor, half_t, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::ColumnMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::RowMajor; using FragmentB = Array<half_t, 4>; using ElementC = half_t; using LayoutC = layout::RowMajor; using FragmentC = Array<half_t, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); unsigned const *C = reinterpret_cast<unsigned const *>(&c); unsigned *D = reinterpret_cast<unsigned *>(&d); asm volatile("mma.sync.aligned.m8n8k4.col.row.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; /// Matrix multiply-add operation: F16 = F16 * F16 + F16 template <> struct Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, layout::RowMajor, half_t, layout::ColumnMajor, half_t, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::RowMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<half_t, 4>; using ElementC = half_t; using LayoutC = layout::RowMajor; using FragmentC = Array<half_t, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); unsigned const *C = reinterpret_cast<unsigned const *>(&c); unsigned *D = reinterpret_cast<unsigned *>(&d); asm volatile("mma.sync.aligned.m8n8k4.row.col.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; /// Matrix multiply-add operation: F16 = F16 * F16 + F16 template <> struct Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, layout::RowMajor, half_t, layout::RowMajor, half_t, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::RowMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::RowMajor; using FragmentB = Array<half_t, 4>; using ElementC = half_t; using LayoutC = layout::RowMajor; using FragmentC = Array<half_t, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); unsigned const *C = reinterpret_cast<unsigned const *>(&c); unsigned *D = reinterpret_cast<unsigned *>(&d); asm volatile("mma.sync.aligned.m8n8k4.row.row.f16.f16.f16.f16 {%0,%1,%2,%3}, {%4,%5}, {%6,%7}, {%8,%9,%10,%11};\n" : "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Matrix multiply accumulate 884 - FP32 accumulation // ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: F32 = F16 * F16 + F32 template <> struct Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, layout::ColumnMajor, half_t, layout::ColumnMajor, float, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::ColumnMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<half_t, 4>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; /// Multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); asm volatile("mma.sync.aligned.m8n8k4.col.col.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, " "{%12,%13,%14,%15,%16,%17,%18,%19};\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]), "=f"(D[4]), "=f"(D[5]), "=f"(D[6]), "=f"(D[7]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "f"(C[4]), "f"(C[5]), "f"(C[6]), "f"(C[7]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; /// Matrix multiply-add operation: F32 = F16 * F16 + F32 template <> struct Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, layout::ColumnMajor, half_t, layout::RowMajor, float, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::ColumnMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::RowMajor; using FragmentB = Array<half_t, 4>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; /// Multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); asm volatile("mma.sync.aligned.m8n8k4.col.row.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, " "{%12,%13,%14,%15,%16,%17,%18,%19};\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]), "=f"(D[4]), "=f"(D[5]), "=f"(D[6]), "=f"(D[7]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "f"(C[4]), "f"(C[5]), "f"(C[6]), "f"(C[7]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; /// Matrix multiply-add operation: F32 = F16 * F16 + F32 template <> struct Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, layout::RowMajor, half_t, layout::ColumnMajor, float, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::RowMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::ColumnMajor; using FragmentB = Array<half_t, 4>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; /// Multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); asm volatile("mma.sync.aligned.m8n8k4.row.col.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, " "{%12,%13,%14,%15,%16,%17,%18,%19};\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]), "=f"(D[4]), "=f"(D[5]), "=f"(D[6]), "=f"(D[7]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "f"(C[4]), "f"(C[5]), "f"(C[6]), "f"(C[7]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; /// Matrix multiply-add operation: F32 = F16 * F16 + F32 template <> struct Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, layout::RowMajor, half_t, layout::RowMajor, float, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<8, 8, 4>; using ElementA = half_t; using LayoutA = layout::RowMajor; using FragmentA = Array<half_t, 4>; using ElementB = half_t; using LayoutB = layout::RowMajor; using FragmentB = Array<half_t, 4>; using ElementC = float; using LayoutC = layout::RowMajor; using FragmentC = Array<float, 8>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm70; /// Multiply-add CUTLASS_HOST_DEVICE void operator()( FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c ) { #if defined(CUTLASS_ARCH_MMA_SM70_ENABLED) unsigned const *A = reinterpret_cast<unsigned const *>(&a); unsigned const *B = reinterpret_cast<unsigned const *>(&b); float const *C = reinterpret_cast<float const *>(&c); float *D = reinterpret_cast<float *>(&d); asm volatile("mma.sync.aligned.m8n8k4.row.row.f32.f16.f16.f32 {%0,%1,%2,%3,%4,%5,%6,%7}, {%8,%9}, {%10,%11}, " "{%12,%13,%14,%15,%16,%17,%18,%19};\n" : "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3]), "=f"(D[4]), "=f"(D[5]), "=f"(D[6]), "=f"(D[7]) : "r"(A[0]), "r"(A[1]), "r"(B[0]), "r"(B[1]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "f"(C[4]), "f"(C[5]), "f"(C[6]), "f"(C[7]) ); #else assert(0); #if defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif #endif } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation specialized for the entire warp template < typename LayoutA, typename LayoutB, typename ElementC, typename LayoutC, typename Operator > struct Mma< gemm::GemmShape<16, 16, 4>, 32, half_t, LayoutA, half_t, LayoutB, ElementC, LayoutC, Operator > : public Mma< gemm::GemmShape<8, 8, 4>, 8, half_t, LayoutA, half_t, LayoutB, ElementC, LayoutC, Operator> { using Shape = gemm::GemmShape<16, 16, 4>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace arch } // namespace cutlass
include/cutlass/arch/mma_sm70.h/0
{ "file_path": "include/cutlass/arch/mma_sm70.h", "repo_id": "include", "token_count": 7247 }
14
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This file contains definitions and utility functions for describing convolution problem sizes. Conv2dProblem desciption: activation (NHWC), filter (KRSC), output (NPQK), pading (pad_h, pad_w), stride (stride_h, stride_w), dilation (dilation_h, dilation_w). Free functions to map: Map tensor extents (Conv2d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_extent(ConvolutionOperator) Map tensor sizes (Conv2d -> ImplicitGemm) : implicit_gemm_tensor_[a|b|c]_size(ConvolutionOperator) Map tensor problem sizes (Conv2d -> ImplicitGemm): implicit_gemm_problem_size(ConvolutionOperator) */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/tensor_coord.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/gemm_enumerated_types.h" #include "cutlass/matrix_coord.h" #include "cutlass/conv/convolution.h" #include "cutlass/functional.h" namespace cutlass { namespace conv { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Problem size structure struct Conv2dProblemSize { // Conv2d strictly problem size parameters int N, H, W, C, P, Q, K, R, S; int pad_h, pad_w; int stride_h, stride_w; int dilation_h, dilation_w; Mode mode; // Conv2d implementation-related parameters int split_k_slices; int groups; // // Methods // public: CUTLASS_HOST_DEVICE Conv2dProblemSize(): N(0), H(0), W(0), C(0), P(0), Q(0), K(0), R(0), S(0), pad_h(0), pad_w(0), stride_h(1), stride_w(1), dilation_h(1), dilation_w(1), mode(Mode::kConvolution), split_k_slices(1), groups(1) { } /// Constructor for default padding, stride, dilation, and split-K CUTLASS_HOST_DEVICE Conv2dProblemSize( int N, int H, int W, int C, int P, int Q, int K, int R, int S, Mode mode ): N(N), H(H), W(W), C(C), P(P), Q(Q), K(K), R(R), S(S), pad_h(R / 2), pad_w(S / 2), stride_h(1), stride_w(1), dilation_h(1), dilation_w(1), mode(mode), split_k_slices(1), groups (1) { } /// Constructor CUTLASS_HOST_DEVICE Conv2dProblemSize( int N, int H, int W, int C, int K, int R, int S, int P, int Q, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, Mode mode, int split_k_slices = 1, int groups = 1 ): N(N), H(H), W(W), C(C), P(P), Q(Q), K(K), R(R), S(S), pad_h(pad_h), pad_w(pad_w), stride_h(stride_h), stride_w(stride_w), dilation_h(dilation_h), dilation_w(dilation_w), mode(mode), split_k_slices(split_k_slices), groups (groups) { } /// Constructs convolution problem size from cutlass Tensor4DCoord and MatrixCoord // set user-defined output size and sets P and Q (include all data members in ctor) CUTLASS_HOST_DEVICE Conv2dProblemSize( cutlass::Tensor4DCoord input_size, // NHWC cutlass::Tensor4DCoord filter_size, // KRSC cutlass::Tensor4DCoord padding, // pad_h, _, pad_w, _ cutlass::MatrixCoord stride, // stride_h, stride_w cutlass::MatrixCoord dilation, // dilation_h, dilation_w cutlass::Tensor4DCoord output_size, // NPQK cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation, int split_k_slices = 1, int groups = 1 ): N(input_size.n()), H(input_size.h()), W(input_size.w()), C(input_size.c()), P(output_size.h()), Q(output_size.w()), K(filter_size.n()), R(filter_size.h()), S(filter_size.w()), pad_h(padding[0]), pad_w(padding[2]), stride_h(stride.row()), stride_w(stride.column()), dilation_h(dilation.row()), dilation_w(dilation.column()), mode(mode), split_k_slices(split_k_slices), groups(groups) {} /// Constructs convolution problem size from cutlass Tensor4DCoord and MatrixCoord // computes output size and sets P and Q (skip output from ctor arguments) CUTLASS_HOST_DEVICE Conv2dProblemSize( cutlass::Tensor4DCoord input_size, // NHWC cutlass::Tensor4DCoord filter_size, // KRSC cutlass::Tensor4DCoord padding, // pad_h, upper_pad_h, pad_w, upper_pad_w cutlass::MatrixCoord stride, // stride_h, stride_w cutlass::MatrixCoord dilation, // dilation_h, dilation_w cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation, int split_k_slices = 1, int groups = 1 ): N(input_size.n()), H(input_size.h()), W(input_size.w()), C(input_size.c()), K(filter_size.n()), R(filter_size.h()), S(filter_size.w()), pad_h(padding[0]), pad_w(padding[2]), stride_h(stride.row()), stride_w(stride.column()), dilation_h(dilation.row()), dilation_w(dilation.column()), mode(mode), split_k_slices(split_k_slices), groups(groups) { // set output P and Q P = ((H + pad_h + padding[1] - R * dilation_h) / stride_h) + 1; Q = ((W + pad_w + padding[3] - S * dilation_w) / stride_w) + 1; } /// Constructs convolution problem size from cutlass Tensor4DCoord and MatrixCoord // set user-defined output size and sets P and Q (skip padding, striding, and dilation) CUTLASS_HOST_DEVICE Conv2dProblemSize( cutlass::Tensor4DCoord input_size, // NHWC cutlass::Tensor4DCoord filter_size, // KRSC cutlass::Tensor4DCoord output_size, // NPQK cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation, int split_k_slices = 1, int groups = 1 ): N(input_size.n()), H(input_size.h()), W(input_size.w()), C(input_size.c()), P(output_size.h()), Q(output_size.w()), K(filter_size.n()), R(filter_size.h()), S(filter_size.w()), pad_h(R / 2), pad_w(S / 2), stride_h(1), stride_w(1), dilation_h(1), dilation_w(1), mode(mode), split_k_slices(split_k_slices), groups(groups) {} // Reset covolution mode in the problem CUTLASS_HOST_DEVICE Conv2dProblemSize reset_mode(cutlass::conv::Mode mode_) { Conv2dProblemSize tmp(*this); tmp.mode = mode_; return tmp; } // Reset covolution mode in the problem CUTLASS_HOST_DEVICE Conv2dProblemSize reset_split_k_slices(int split_k_slices_) { Conv2dProblemSize tmp(*this); tmp.split_k_slices = split_k_slices_; return tmp; } /// Equality operator (ignores mode and split_k_slice) CUTLASS_HOST_DEVICE bool operator==(Conv2dProblemSize const &conv) const { return ( (N == conv.N) && (H == conv.H) && (W == conv.W) && (C == conv.C) && (K == conv.K) && (R == conv.R) && (S == conv.S) && (P == conv.P) && (Q == conv.Q) && (pad_h == conv.pad_h) && (pad_w == conv.pad_w) && (stride_h == conv.stride_h) && (stride_w == conv.stride_w) && (dilation_h == conv.dilation_h) && (dilation_w == conv.dilation_w) ); } /// Inequality operator CUTLASS_HOST_DEVICE bool operator!=(Conv2dProblemSize const &rhs) const { return !(*this == rhs); } /// Returns activation extent as Tensor4DCoord CUTLASS_HOST_DEVICE cutlass::Tensor4DCoord activation_extent() const { return cutlass::Tensor4DCoord ({N, H, W, C}); } /// Returns filter extent as Tensor4DCoord CUTLASS_HOST_DEVICE cutlass::Tensor4DCoord filter_extent(bool is_deconv = false) const { return is_deconv ? cutlass::Tensor4DCoord ({C, R, S, K / groups}) : cutlass::Tensor4DCoord ({K, R, S, C / groups}); } /// Returns output extent as Tensor4DCoord CUTLASS_HOST_DEVICE cutlass::Tensor4DCoord output_extent() const { return cutlass::Tensor4DCoord ({N, P, Q, K}); } /// Returns activation size in number of elements CUTLASS_HOST_DEVICE int64_t activation_size() const { return (N * H * W * C); } /// Returns filter size in number of elements CUTLASS_HOST_DEVICE int64_t filter_size() const { return (K * R * S * C / groups); } /// Returns output size in number of elements CUTLASS_HOST_DEVICE int64_t output_size() const { return (N * P * Q * K); } /// Returns padding as Tensor4DCoord CUTLASS_HOST_DEVICE cutlass::Tensor4DCoord padding() const { return cutlass::Tensor4DCoord ({pad_h, pad_h, pad_w, pad_w}); } /// Returns stride as MatrixCoord CUTLASS_HOST_DEVICE cutlass::MatrixCoord stride() const { return cutlass::MatrixCoord ({stride_h, stride_w}); } /// Returns dilation as MatrixCoord CUTLASS_HOST_DEVICE cutlass::MatrixCoord dilation() const { return cutlass::MatrixCoord ({dilation_h, dilation_w}); } ///////////////////////////////////////////////////////////////// // Methods used for strided dgrad implementation ///////////////////////////////////////////////////////////////// /// Number of filter r positions to accumulate in gemm-k dim CUTLASS_HOST_DEVICE int num_gemm_k_filter_r(int r) const { return ((R - r + stride_h - 1) / stride_h); } /// Number of filter s positions to accumulate in gemm-k dim CUTLASS_HOST_DEVICE int num_gemm_k_filter_s(int s) const { return ((S - s + stride_w - 1) / stride_w); } /// Number of filter positions to accumulate in gemm-k dim CUTLASS_HOST_DEVICE int num_gemm_k_filter_positions(int r, int s) const { return num_gemm_k_filter_r(r) * num_gemm_k_filter_s(s); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // ImplicitGemm helper functions // //////////////////////////////////////////////////////////////////////////////////////////////////// /// Determine the problem size of the implicit GEMM operation CUTLASS_HOST_DEVICE cutlass::gemm::GemmCoord implicit_gemm_problem_size( Operator conv_operator, Conv2dProblemSize const &problem_size) { // Compute problem size switch (conv_operator) { case Operator::kFprop: return gemm::GemmCoord( problem_size.N * problem_size.P * problem_size.Q, problem_size.K, problem_size.R * problem_size.S * problem_size.C / problem_size.groups ); case Operator::kDeconv: case Operator::kDgrad: return gemm::GemmCoord( problem_size.N * problem_size.H * problem_size.W, problem_size.C, problem_size.R * problem_size.S * problem_size.K ); case Operator::kWgrad: return gemm::GemmCoord( problem_size.K, problem_size.R * problem_size.S * problem_size.C, problem_size.N * problem_size.P * problem_size.Q ); default: break; } return gemm::GemmCoord(); } // Determine the number of gemm_k iterations for conv2d problem using implicit gemm algorithm CUTLASS_HOST_DEVICE int implicit_gemm_k_iterations( Operator conv_operator, int threadblock_K, Conv2dProblemSize const &problem_size, IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic, GroupMode group_mode = GroupMode::kNone, int threadblock_N = 0) { int iterations = 0; if (group_mode == GroupMode::kNone) { if (algorithm == IteratorAlgorithm::kFixedChannels) { int positions_per_iteration = threadblock_K / problem_size.C; switch (conv_operator) { case Operator::kFprop: iterations = (problem_size.R * problem_size.S + positions_per_iteration - 1 ) / positions_per_iteration; break; default: break; } } else if (algorithm == IteratorAlgorithm::kFewChannels) { switch (conv_operator) { case Operator::kFprop: iterations = (problem_size.R * problem_size.S * problem_size.C + threadblock_K - 1 ) / threadblock_K; break; default: break; } } else { int elements_per_split_k_slice = 0; switch (conv_operator) { case Operator::kFprop: elements_per_split_k_slice = (problem_size.C + problem_size.split_k_slices - 1) / problem_size.split_k_slices; iterations = problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K); break; case Operator::kDeconv: case Operator::kDgrad: elements_per_split_k_slice = (problem_size.K + problem_size.split_k_slices - 1) / problem_size.split_k_slices; iterations = problem_size.R * problem_size.S * ((elements_per_split_k_slice + threadblock_K - 1) / threadblock_K); break; case Operator::kWgrad: elements_per_split_k_slice = (problem_size.N * problem_size.P * problem_size.Q + problem_size.split_k_slices - 1) / problem_size.split_k_slices; iterations = (elements_per_split_k_slice + threadblock_K - 1) / threadblock_K; break; default: break; } } } else if (group_mode == GroupMode::kDepthwise) { int channels_per_cta = threadblock_N; if (algorithm == IteratorAlgorithm::kAnalytic) { switch (conv_operator) { case Operator::kFprop: iterations = problem_size.R * problem_size.S * ((channels_per_cta + threadblock_K - 1) / threadblock_K); break; default: break; } } } else { // Group conv int channels_per_group = problem_size.C / problem_size.groups; int k_per_group = problem_size.K / problem_size.groups; if (algorithm == IteratorAlgorithm::kAnalytic) { switch (conv_operator) { case Operator::kFprop: iterations = problem_size.R * problem_size.S * ((channels_per_group + threadblock_K - 1) / threadblock_K); // In group conv, if k_per_group < threadblock_N, one Threadblock will calculate multiple groups if (problem_size.groups != 1) { if (k_per_group < threadblock_N) { iterations *= threadblock_N / k_per_group; } } break; default: break; } } else if (algorithm == IteratorAlgorithm::kOptimized) { // Current optimized iterator only support GroupMode::kSingleGroup if (group_mode == GroupMode::kSingleGroup) { switch (conv_operator) { case Operator::kFprop: iterations = problem_size.R * problem_size.S * ((channels_per_group + threadblock_K - 1) / threadblock_K); break; default: break; } } } } return iterations; } template <int N = 1, int Output_P = 1, int Output_Q = 1> CUTLASS_HOST_DEVICE int depthwise_gemm_k_iterations( Operator conv_operator, int threadblock_K, Conv2dProblemSize const &problem_size, IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic, GroupMode group_mode = GroupMode::kNone, int threadblock_N = 0) { int n = problem_size.N; int p = (problem_size.P + Output_P - 1) / Output_P; int q = (problem_size.Q + Output_Q - 1) / Output_Q; int iterations = (n * p * q + problem_size.split_k_slices - 1) / problem_size.split_k_slices; return iterations; } CUTLASS_HOST_DEVICE int implicit_gemm_k_iterations_per_channel( Operator conv_operator, Conv2dProblemSize const &problem_size, IteratorAlgorithm algorithm = IteratorAlgorithm::kAnalytic) { int iterations = 0; //0 means not applicable if (algorithm == IteratorAlgorithm::kAnalytic || algorithm == IteratorAlgorithm::kOptimized) { switch (conv_operator) { case Operator::kFprop: iterations = problem_size.R * problem_size.S; break; case Operator::kDeconv: case Operator::kDgrad: iterations = problem_size.R * problem_size.S; break; default: break; } } return iterations; } //////////////////////////////////////////////////////////////////////////////// // Mapping function (ImplicitGemm A, B, C -> Conv Activation, Filter, Output) //////////////////////////////////////////////////////////////////////////////// /// Returns ImplicitGemm tensor A extent as Tensor4DCoord CUTLASS_HOST_DEVICE cutlass::Tensor4DCoord implicit_gemm_tensor_a_extent( Operator conv_operator, Conv2dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.activation_extent(); case cutlass::conv::Operator::kDeconv: case cutlass::conv::Operator::kDgrad: return problem_size.output_extent(); case cutlass::conv::Operator::kWgrad: return problem_size.output_extent(); default : break; } return cutlass::Tensor4DCoord(); } /// Returns ImplicitGemm tensor B extent as Tensor4DCoord CUTLASS_HOST_DEVICE cutlass::Tensor4DCoord implicit_gemm_tensor_b_extent( Operator conv_operator, Conv2dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.filter_extent(); case cutlass::conv::Operator::kDeconv: return problem_size.filter_extent(true); case cutlass::conv::Operator::kDgrad: return problem_size.filter_extent(); case cutlass::conv::Operator::kWgrad: return problem_size.activation_extent(); default : break; } return cutlass::Tensor4DCoord(); } /// Returns ImplicitGemm tensor C extent as Tensor4DCoord CUTLASS_HOST_DEVICE cutlass::Tensor4DCoord implicit_gemm_tensor_c_extent( Operator conv_operator, Conv2dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.output_extent(); case cutlass::conv::Operator::kDeconv: case cutlass::conv::Operator::kDgrad: return problem_size.activation_extent(); case cutlass::conv::Operator::kWgrad: return problem_size.filter_extent(); default : break; } return cutlass::Tensor4DCoord(); } /// Returns ImplicitGemm tensor A size in number of elements CUTLASS_HOST_DEVICE int64_t implicit_gemm_tensor_a_size( Operator conv_operator, Conv2dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.activation_size(); case cutlass::conv::Operator::kDeconv: case cutlass::conv::Operator::kDgrad: return problem_size.output_size(); case cutlass::conv::Operator::kWgrad: return problem_size.output_size(); default : break; } return 0; } /// Returns ImplicitGemm tensor B size in number of elements CUTLASS_HOST_DEVICE int64_t implicit_gemm_tensor_b_size( Operator conv_operator, Conv2dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.filter_size(); case cutlass::conv::Operator::kDeconv: case cutlass::conv::Operator::kDgrad: return problem_size.filter_size(); case cutlass::conv::Operator::kWgrad: return problem_size.activation_size(); default : break; } return 0; } /// Returns ImplicitGemm tensor C size in number of elements CUTLASS_HOST_DEVICE int64_t implicit_gemm_tensor_c_size( Operator conv_operator, Conv2dProblemSize const &problem_size) { switch (conv_operator) { case cutlass::conv::Operator::kFprop: return problem_size.output_size(); case cutlass::conv::Operator::kDeconv: case cutlass::conv::Operator::kDgrad: return problem_size.activation_size(); case cutlass::conv::Operator::kWgrad: return problem_size.filter_size(); default : break; } return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// // Strided dgrad helper functions // //////////////////////////////////////////////////////////////////////////////////////////////////// // Returns number of CTAs tile M to cover valid MMAs per starting filter postion CUTLASS_HOST_DEVICE int strided_dgrad_tile_m_per_filter( Conv2dProblemSize const &problem_size, int tile_size_m) { // Compute NHW rows in Dx output that needs MMA per starting filter position int rows_h_per_filter = (problem_size.H + problem_size.stride_h - 1) / problem_size.stride_h; int rows_w_per_filter = (problem_size.W + problem_size.stride_w - 1) / problem_size.stride_w; int rows_nhw_per_filter = problem_size.N * rows_h_per_filter * rows_w_per_filter; // Number of CTAs tile M to cover valid MMAs per starting filter postion int tile_m_per_filter = (rows_nhw_per_filter + tile_size_m - 1) / tile_size_m; return tile_m_per_filter; } // Computes starting Dx coord (h, w) for given starting filter postion CUTLASS_HOST_DEVICE void strided_dgrad_starting_coords( Conv2dProblemSize const &problem_size, FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod, int r, int s, int &start_h, int &start_w) { // function locals for remainder by fast divmod int pad_h_rem_, pad_w_rem_; // start_h = std::abs(problem_size.stride_h - ((problem_size.pad_h % problem_size.stride_h) - r)) % problem_size.stride_h; stride_h_divmod.divmod(pad_h_rem_, problem_size.pad_h); int r_ = absolute_value(problem_size.stride_h - (pad_h_rem_ - r)); stride_h_divmod.divmod(start_h, r_); //start_w = std::abs(problem_size.stride_w - ((problem_size.pad_w % problem_size.stride_w) - s)) % problem_size.stride_w; stride_w_divmod.divmod(pad_w_rem_, problem_size.pad_w); int s_ = absolute_value(problem_size.stride_w - (pad_w_rem_ - s)); stride_w_divmod.divmod(start_w, s_); } } // namespace conv } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/conv/conv2d_problem_size.h/0
{ "file_path": "include/cutlass/conv/conv2d_problem_size.h", "repo_id": "include", "token_count": 8897 }
15
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined fused activation's scale+bias+relu and Implicit GEMM kernel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/aligned_buffer.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/semaphore.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/tensor.h" #include "cutlass/gemm/gemm.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/epilogue/threadblock/output_iterator_parameter.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad) typename ConvProblemSize_ = Conv2dProblemSize ///! Convolutional operator on 2D or 3D problem > struct ImplicitGemmConvolutionFusion { using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; static Operator const kConvolutionalOperator = ConvOperator; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementScaleBias = typename Mma::IteratorScaleBias::Element; using LayoutScaleBias = typename Mma::IteratorScaleBias::Layout; using ElementC = typename EpilogueOutputOp::ElementOutput; using LayoutC = LayoutA; using ElementAccumulator = typename EpilogueOutputOp::ElementAccumulator; using ElementCompute = typename EpilogueOutputOp::ElementCompute; using WarpMmaOperator = typename Mma::Policy::Operator; using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator; using MathOperator = typename ArchMmaOperator::Operator; using OperatorClass = typename WarpMmaOperator::OperatorClass; using ArchTag = typename WarpMmaOperator::ArchTag; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename WarpMmaOperator::Shape; using InstructionShape = typename ArchMmaOperator::Shape; static int const kStages = Mma::kStages; static IteratorAlgorithm const kIteratorAlgorithm = Mma::IteratorA::kIteratorAlgorithm; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; using TensorRefA = typename Mma::IteratorA::TensorRef; using TensorRefB = typename Mma::IteratorB::TensorRef; using TensorRefScaleBias = typename Mma::IteratorScaleBias::TensorRef; using TensorRefC = cutlass::TensorRef<ElementC, LayoutC>; /// Check iterator A and B convolution dimension are the same and // set device::ImplicitGemmConvolution::kConvDim static_assert(Mma::IteratorA::kConvDim == Mma::IteratorB::kConvDim, "Convolution on different different dimensions is not supported"); static int const kConvDim = Mma::IteratorA::kConvDim; /// Conv dimension and problem size structure (Conv2d or Conv3d) using ConvProblemSize = ConvProblemSize_; static conv::GroupMode const kGroupMode = conv::GroupMode::kNone; /// Wgrad C stride idx for implicit gemm algorithm // Conv2d row-major matrix C (KxRSC) // Conv3d row-major matrix C (KxTRSC) static int const kWgradCStrideIdx = platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value ? 2 : 3; /// This chooses the appropriate stride element of the C tensor. static int const kTensorCStrideIdx = (kConvolutionalOperator == conv::Operator::kWgrad ? kWgradCStrideIdx : 0); // // // using ConvOutputIteratorParameter = epilogue::threadblock::ConvOutputIteratorParameter< LayoutC, typename Epilogue::OutputTileIterator::Layout, TensorRefC, ConvOperator, ConvProblemSize >; /// Argument structure struct Arguments { // // Data members // ConvProblemSize problem_size; TensorRefA ref_A; TensorRefB ref_B; TensorRefScaleBias ref_scale; TensorRefScaleBias ref_bias; TensorRefC ref_C; TensorRefC ref_D; typename EpilogueOutputOp::Params output_op; SplitKMode split_k_mode; // // Methods // /// Default ctor CUTLASS_HOST_DEVICE Arguments() { } CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size ): problem_size(problem_size) { } CUTLASS_HOST_DEVICE Arguments( ConvProblemSize const & problem_size, TensorRefA const & ref_A, TensorRefB const & ref_B, TensorRefScaleBias const & ref_scale, TensorRefScaleBias const & ref_bias, TensorRefC const & ref_C, TensorRefC const & ref_D, typename EpilogueOutputOp::Params const & output_op, SplitKMode const & split_k_mode = SplitKMode::kSerial ): problem_size(problem_size), ref_A(ref_A), ref_B(ref_B), ref_scale(ref_scale), ref_bias(ref_bias), ref_C(ref_C), ref_D(ref_D), output_op(output_op), split_k_mode(split_k_mode) { } }; /// Parameters structure struct Params { ConvProblemSize problem_size{}; cutlass::gemm::GemmCoord grid_tiled_shape{}; gemm::GemmCoord implicit_gemm_problem_size{}; int swizzle_log_tile{0}; int gemm_k_iterations{0}; typename Mma::IteratorA::Params iterator_A{}; typename Mma::IteratorA::Element const *ptr_A = nullptr; typename Mma::IteratorB::Params iterator_B{}; typename Mma::IteratorB::Element const *ptr_B = nullptr; typename Mma::IteratorScaleBias::Params iterator_scale_bias{}; typename Mma::IteratorScaleBias::Element const *ptr_scale = nullptr; typename Mma::IteratorScaleBias::Element const *ptr_bias = nullptr; typename Epilogue::OutputTileIterator::Params iterator_C {}; typename Epilogue::OutputTileIterator::Element *ptr_C = nullptr; typename Epilogue::OutputTileIterator::Params iterator_D {}; typename Epilogue::OutputTileIterator::Element *ptr_D = nullptr; typename EpilogueOutputOp::Params output_op {}; int *semaphore = nullptr; SplitKMode split_k_mode {}; // // Methods // Params() = default; /// CUTLASS_HOST_DEVICE Params( Arguments const &args, int *semaphore = nullptr ): problem_size(args.problem_size), implicit_gemm_problem_size(cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size)), iterator_A(Mma::IteratorA::getParams(args.problem_size, args.ref_A.layout())), ptr_A(args.ref_A.data()), iterator_B(args.problem_size, args.ref_B.layout()), ptr_B(args.ref_B.data()), iterator_scale_bias(args.problem_size, args.ref_scale.layout()), ptr_scale(args.ref_scale.data()), ptr_bias(args.ref_bias.data()), iterator_C(ConvOutputIteratorParameter::layout(args.ref_C)), ptr_C(args.ref_C.data()), iterator_D(ConvOutputIteratorParameter::layout(args.ref_D)), ptr_D(args.ref_D.data()), output_op(args.output_op), semaphore(semaphore), split_k_mode(args.split_k_mode) { gemm_k_iterations = implicit_gemm_k_iterations(kConvolutionalOperator, ThreadblockShape::kK, args.problem_size); ThreadblockSwizzle threadblock_swizzle; grid_tiled_shape = threadblock_swizzle.get_tiled_shape( implicit_gemm_problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices); swizzle_log_tile = threadblock_swizzle.get_log_tile(grid_tiled_shape); } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; // // Methods // CUTLASS_HOST_DEVICE ImplicitGemmConvolutionFusion() { } /// Executes one ImplicitGEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_idx.m() || params.grid_tiled_shape.n() <= threadblock_tile_idx.n()) { return; } // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A operand typename Mma::IteratorA iterator_A( params.iterator_A, params.problem_size, params.ptr_A, thread_idx, MatrixCoord( threadblock_tile_idx.m() * Mma::Shape::kM, threadblock_tile_idx.k() * Mma::Shape::kK ) ); // Construct iterators to B operand typename Mma::IteratorB iterator_B( params.iterator_B, params.problem_size, params.ptr_B, thread_idx, MatrixCoord( threadblock_tile_idx.k() * Mma::Shape::kK, threadblock_tile_idx.n() * Mma::Shape::kN ) ); // Construct iterators to A scale/bias vector typename Mma::IteratorScaleBias iterator_scale_bias( params.iterator_scale_bias, params.problem_size, params.ptr_scale, params.ptr_bias, thread_idx, MatrixCoord( 0, (kConvolutionalOperator == conv::Operator::kFprop) ? (threadblock_tile_idx.k() * Mma::Shape::kK) : // Wgrad (threadblock_tile_idx.n() * Mma::Shape::kN) ) ); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); // Compute threadblock-scoped matrix multiply-add mma(params.gemm_k_iterations, accumulators, iterator_A, iterator_B, iterator_scale_bias, accumulators); // // Epilogue // EpilogueOutputOp output_op(params.output_op); // Construct the semaphore. int block_idx = threadblock_tile_idx.m() + threadblock_tile_idx.n() * params.grid_tiled_shape.m(); Semaphore semaphore(params.semaphore + block_idx, thread_idx); // Compute logical position within grid threadblock_tile_idx = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // If performing a reduction via split-K, fetch the initial synchronization if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_idx.k(), params.grid_tiled_shape.k()); } MatrixCoord threadblock_offset( threadblock_tile_idx.m() * Mma::Shape::kM, threadblock_tile_idx.n() * Mma::Shape::kN ); // Tile iterator writing to destination tensor typename Epilogue::OutputTileIterator iterator_D( params.iterator_D, params.ptr_D, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, threadblock_offset ); // Tile iterator reading from source accumulator tensor typename Epilogue::OutputTileIterator iterator_C( params.iterator_C, params.ptr_C, ConvOutputIteratorParameter::extent(params.problem_size), thread_idx, threadblock_offset ); // Construct the epilogue Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_idx.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_idx.k()); } // Each split-k-slice writes to a unique tensor location else if (params.split_k_mode == SplitKMode::kParallel) { iterator_D.add_pointer_offset(threadblock_tile_idx.k() * cutlass::conv::implicit_gemm_tensor_c_size(ConvOperator, params.problem_size)); } // Run efficient epilogue epilogue(output_op, iterator_D, accumulators, iterator_C); // // Release the semaphore // if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_idx.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_idx.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/conv/kernel/implicit_gemm_convolution_fusion.h/0
{ "file_path": "include/cutlass/conv/kernel/implicit_gemm_convolution_fusion.h", "repo_id": "include", "token_count": 5849 }
16
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Extracts the host-params objects into non-template code. */ #pragma once #define TRACE_CONV_PARAMS_INITIALIZERS_ENABLED 0 #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/threadblock/conv2d_params.h" #include "cutlass/conv/conv3d_problem_size.h" #if TRACE_CONV_PARAMS_INITIALIZERS_ENABLED #include <fstream> #endif ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Params structure used for all Conv3d analytic tile iterators template< typename Layout_ = layout::TensorNDHWC > struct Conv3dAnalyticParams { using Layout = Layout_; Layout layout; // // Methods // CUTLASS_HOST_DEVICE Conv3dAnalyticParams() { } CUTLASS_HOST_DEVICE Conv3dAnalyticParams( Conv3dProblemSize const &, // unused; placeholder to match other Params interfaces. Layout const &layout ): layout(layout) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Parameters structure used for Conv3dFpropActivationTileIteratorOptimized template< typename Layout_ = layout::TensorNDHWC > struct Conv3dFpropActivationIteratorOptimizedParams; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Parameters structure used for Conv3dFpropActivationTileIteratorOptimized template<> struct Conv3dFpropActivationIteratorOptimizedParams<layout::TensorNDHWC> { using Layout = layout::TensorNDHWC; Layout layout; int64_t inc_next[4]; // {next S, next R, next T, next C} int filter_c_delta; // number of logical elements to add to filter_c_ int ZPQ; // product of Z*P*Q int PQ; // product of P*Q FastDivmod zpq_divmod; FastDivmod pq_divmod; FastDivmod q_divmod; // // Methods // CUTLASS_HOST_DEVICE Conv3dFpropActivationIteratorOptimizedParams() { } CUTLASS_HOST_DEVICE Conv3dFpropActivationIteratorOptimizedParams( Conv3dProblemSize const &problem_size, Layout const &layout, ///< layout object int element_size_bits, ///< size of each element in bits MatrixCoord threadblock_shape, int thread_count, int access_size, layout::PitchLinearCoord threadmap_iterations, layout::PitchLinearCoord threadmap_delta ): layout(layout), PQ(problem_size.P * problem_size.Q), ZPQ(problem_size.Z * problem_size.P * problem_size.Q), zpq_divmod(ZPQ), pq_divmod(PQ), q_divmod(problem_size.Q) { TRACE_CONV_INITIALIZERS("conv3d_fprop", "activation", element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta); int conv_sign = (problem_size.mode == Mode::kConvolution ? -1 : 1); // next S inc_next[0] = conv_sign * ( int64_t(layout.stride()[0]) * problem_size.dilation_w ) * element_size_bits / 8; // next R inc_next[1] = conv_sign * ( int64_t(layout.stride()[1]) * problem_size.dilation_h - (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w ) * element_size_bits / 8; // next T inc_next[2] = conv_sign * ( int64_t(layout.stride()[2]) * problem_size.dilation_d - (problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h - (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w ) * element_size_bits / 8; // next C inc_next[3] = ( threadblock_shape.column() * problem_size.split_k_slices - conv_sign * int64_t(problem_size.T - 1) * layout.stride()[2] * problem_size.dilation_d - conv_sign * int64_t(problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h - conv_sign * int64_t(problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w ) * element_size_bits / 8; // logical offset added to internal channel counter - units are elements, not bytes filter_c_delta = threadblock_shape.column() * problem_size.split_k_slices; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template< typename Layout_ = layout::TensorNDHWC > struct Conv3dFpropFilterIteratorOptimizedParams; ///////////////////////////////////////////////////////////////////////////////////////////////// template<> struct Conv3dFpropFilterIteratorOptimizedParams<layout::TensorNDHWC> { using Layout = layout::TensorNDHWC; Layout layout; int TRS; int filter_c_delta; int64_t inc_next_k; // offset in units of bytes to next K position int64_t inc_next_trs; // offset in units of bytes to next TRS position int64_t inc_next_c; // offset in units of bytes to next C position // // Methods // CUTLASS_HOST_DEVICE Conv3dFpropFilterIteratorOptimizedParams() { } CUTLASS_HOST_DEVICE Conv3dFpropFilterIteratorOptimizedParams( Conv3dProblemSize const &problem_size, Layout const &layout, int element_size_bits, ///< size of each element in bits MatrixCoord threadblock_shape, int thread_count, int access_size, layout::PitchLinearCoord threadmap_iterations, layout::PitchLinearCoord threadmap_delta ): layout(layout) { TRACE_CONV_INITIALIZERS("conv3d_fprop", "filter", element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta); TRS = problem_size.T * problem_size.R * problem_size.S; inc_next_k = (int64_t(layout.stride()[3]) * threadmap_delta.strided() * element_size_bits) / 8; inc_next_trs = ( int64_t(layout.stride()[0]) - int64_t(layout.stride()[3]) * (threadmap_iterations.strided() - 1) * threadmap_delta.strided() ) * element_size_bits / 8; inc_next_c = ( threadblock_shape.row() * problem_size.split_k_slices - int64_t(TRS - 1) * layout.stride()[0] - int64_t(threadmap_iterations.strided() - 1) * threadmap_delta.strided() * layout.stride()[3] ) * element_size_bits / 8; filter_c_delta = threadblock_shape.row() * problem_size.split_k_slices; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Parameters object for Conv3d DGRAD OutputGradient (dy) iterator struct Conv3dDgradOutputGradientIteratorOptimizedParams { using Layout = layout::TensorNDHWC; Layout layout; int64_t inc_next[4]; // {next S, next R, next T, next K} int filter_k_delta; // number of logical elements to add to filter_k_ FastDivmod dhw_divmod; FastDivmod hw_divmod; FastDivmod w_divmod; // // Methods // CUTLASS_HOST_DEVICE Conv3dDgradOutputGradientIteratorOptimizedParams() { } CUTLASS_HOST_DEVICE Conv3dDgradOutputGradientIteratorOptimizedParams( Conv3dProblemSize const &problem_size, Layout const &layout, ///< layout object int element_size_bits, ///< size of each element in bits MatrixCoord threadblock_shape, int thread_count, int access_size, layout::PitchLinearCoord threadmap_iterations, layout::PitchLinearCoord threadmap_delta ): layout(layout), dhw_divmod(problem_size.D * problem_size.H * problem_size.W), hw_divmod(problem_size.H * problem_size.W), w_divmod(problem_size.W) { TRACE_CONV_INITIALIZERS("conv3d_dgrad", "output_gradient", element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta); int conv_sign = (problem_size.mode == Mode::kConvolution ? 1 : -1); // next S inc_next[0] = conv_sign * ( int64_t(layout.stride()[0]) * problem_size.dilation_w ) * element_size_bits / 8; // next R inc_next[1] = conv_sign * ( int64_t(layout.stride()[1]) * problem_size.dilation_h - (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w ) * element_size_bits / 8; // next T inc_next[2] = conv_sign * ( int64_t(layout.stride()[2]) * problem_size.dilation_d - (problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h - (problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w ) * element_size_bits / 8; // next K inc_next[3] = ( threadblock_shape.column() * problem_size.split_k_slices - conv_sign * int64_t(problem_size.T - 1) * layout.stride()[2] * problem_size.dilation_d - conv_sign * int64_t(problem_size.R - 1) * layout.stride()[1] * problem_size.dilation_h - conv_sign * int64_t(problem_size.S - 1) * layout.stride()[0] * problem_size.dilation_w ) * element_size_bits / 8; // logical offset added to internal channel counter - units are elements, not bytes filter_k_delta = threadblock_shape.column() * problem_size.split_k_slices; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Parameters object for Conv2d DGRAD Filter (w) iterator struct Conv3dDgradFilterIteratorOptimizedParams { using Layout = layout::TensorNDHWC; Layout layout; int TRS; int filter_k_delta; int64_t inc_next_strided; // offset in units of bytes to next K coordinate within tile int64_t inc_next_trs; // offset in units of bytes to next TRS position int64_t inc_next_k; // offset in units of bytes to next K position in subsequent tile // // Methods // CUTLASS_HOST_DEVICE Conv3dDgradFilterIteratorOptimizedParams() { } CUTLASS_HOST_DEVICE Conv3dDgradFilterIteratorOptimizedParams( Conv3dProblemSize const &problem_size, Layout const &layout, int element_size_bits, ///< size of each element in bits MatrixCoord threadblock_shape, int thread_count, int access_size, layout::PitchLinearCoord threadmap_iterations, layout::PitchLinearCoord threadmap_delta ): layout(layout), TRS(problem_size.T * problem_size.R * problem_size.S) { TRACE_CONV_INITIALIZERS("conv3d_dgrad", "filter", element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta); inc_next_strided = ((int64_t)layout.stride()[3] * threadmap_delta.strided() * element_size_bits) / 8; inc_next_trs = ( (int64_t)layout.stride()[0] - (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[3] ) * element_size_bits / 8; inc_next_k = ( threadblock_shape.row() * problem_size.split_k_slices * (int64_t)layout.stride()[3] - (problem_size.T * problem_size.R * problem_size.S - 1) * (int64_t)layout.stride()[0] - (threadmap_iterations.strided() - 1) * threadmap_delta.strided() * (int64_t)layout.stride()[3] ) * element_size_bits / 8; filter_k_delta = threadblock_shape.row() * problem_size.split_k_slices; } }; /// Parameters object for Conv3d WGRAD OutputGradient iterator struct Conv3dWgradOutputGradientIteratorOptimizedParams { using Layout = layout::TensorNDHWC; using LongIndex = typename Layout::LongIndex; Layout layout; int NZPQ; // precomputd product of N*Z*P*Q for clearing predicates int ZPQ; // product of Z*P*Q unsigned zpq_mul; // precomputed quantities for fast computation of div/% by ZPQ unsigned zpq_shr; // in device code. int PQ; // product of P*Q unsigned pq_mul; // precomputed quantities for fast computation of div/% by PQ unsigned pq_shr; // in device code. unsigned q_mul; // precomputed quantities for fast computation of div/% by Q unsigned q_shr; // in device code. LongIndex offset_next_strided; // offset in units of bytes to next nzpq coordinate within tile LongIndex offset_next_contiguous; // offset in units of bytes to next k coordinate within tile LongIndex inc_next_nzpq; // offset in units of bytes to next nzpq position in subsequent tile // // Methods // CUTLASS_HOST_DEVICE Conv3dWgradOutputGradientIteratorOptimizedParams() { } CUTLASS_HOST_DEVICE Conv3dWgradOutputGradientIteratorOptimizedParams( Conv3dProblemSize const &problem_size, Layout const &layout, int element_size_bits, MatrixCoord threadblock_shape, int thread_count, int access_size, layout::PitchLinearCoord threadmap_iterations, layout::PitchLinearCoord threadmap_delta ): layout(layout) { TRACE_CONV_INITIALIZERS("conv3d_wgrad", "output_gradient", element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta); // Incremental offsets in unites of bytes (number of elements) * element_size_bits / 8 offset_next_strided = (threadmap_delta.strided() * (int64_t)layout.stride()[0]) * element_size_bits / 8; offset_next_contiguous = (threadmap_delta.contiguous()) * element_size_bits / 8; inc_next_nzpq = (threadblock_shape.column() * problem_size.split_k_slices * (int64_t)layout.stride()[0]) * element_size_bits / 8; // Precompute several quantities for fast modulo arithmetic. NZPQ = problem_size.N * problem_size.Z * problem_size.P * problem_size.Q; ZPQ = problem_size.Z * problem_size.P * problem_size.Q; find_divisor(zpq_mul, zpq_shr, ZPQ); PQ = problem_size.P * problem_size.Q; find_divisor(pq_mul, pq_shr, PQ); find_divisor(q_mul, q_shr, problem_size.Q); } }; /// Parameters object for Conv3d WGRAD Activation Tile Access Iterator struct Conv3dWgradActivationIteratorOptimizedParams { using Layout = layout::TensorNDHWC; Layout layout; int RSC; // product of R*S*C unsigned rsc_mul; // precomputed quantities for fast computation of div/% by RSC unsigned rsc_shr; // in device code. int SC; // product of S*C unsigned sc_mul; // precomputed quantities for fast computation of div/% by SC unsigned sc_shr; // in device code. unsigned c_mul; // precomputed quantities for fast computation of div/% by C unsigned c_shr; // in device code. int ZPQ; // product of Z*P*Q unsigned zpq_mul; // precomputed quantities for fast computation of div/% by ZPQ unsigned zpq_shr; // in device code. int PQ; // product of P*Q unsigned pq_mul; // precomputed quantities for fast computation of div/% by PQ unsigned pq_shr; // in device code. unsigned q_mul; // precomputed quantities for fast computation of div/% by Q unsigned q_shr; // in device code. // // Methods // CUTLASS_HOST_DEVICE Conv3dWgradActivationIteratorOptimizedParams() { } CUTLASS_HOST_DEVICE Conv3dWgradActivationIteratorOptimizedParams( Conv3dProblemSize const &problem_size, Layout const &layout, int element_size_bits, MatrixCoord threadblock_shape, int thread_count, int access_size, layout::PitchLinearCoord threadmap_iterations, layout::PitchLinearCoord threadmap_delta ): layout(layout) { TRACE_CONV_INITIALIZERS("conv3d_wgrad", "activation", element_size_bits, threadblock_shape, thread_count, access_size, threadmap_iterations, threadmap_delta); // Precompute several quantities for fast modulo arithmetic. RSC = problem_size.R * problem_size.S * problem_size.C; find_divisor(rsc_mul, rsc_shr, RSC); SC = problem_size.S * problem_size.C; find_divisor(sc_mul, sc_shr, SC); find_divisor(c_mul, c_shr, problem_size.C); ZPQ = problem_size.Z * problem_size.P * problem_size.Q; find_divisor(zpq_mul, zpq_shr, ZPQ); PQ = problem_size.P * problem_size.Q; find_divisor(pq_mul, pq_shr, PQ); find_divisor(q_mul, q_shr, problem_size.Q); } }; } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/conv/threadblock/conv3d_params.h/0
{ "file_path": "include/cutlass/conv/threadblock/conv3d_params.h", "repo_id": "include", "token_count": 6898 }
17
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a multistage threadblock-scoped fused activation's scale+bias+relu and Implicit GEMM Convolution kernel. The original implicit gemm will store out-of-bound data as zeroes in the shared memory because zeros into the tensor core, zeroes out of the tensor cores. The result is remained the same. When fusing scale+bias+relu into the mainloop, it is no longer true because 0 x scale + bias = bias which is no longer always 0. So, instead of storing zeroes, this fused kernel stores the out-of-bound data as a special NaN (0x7eff), when applying scale+bias+relu, the code is like if (data == 0x7eff) data = 0; else data = scale+bias+relu(data, scale, bias); The biggest difference compared with the fused Fprop and scale+bias+relu is that scale and bias are loop invariant in Wgrad so that they only needs to be loaded once before the mainloop. See include/cutlass/conv/warp/scale_bias_relu_transformation.h for the elementwise computation. See include/cutlass/arch/memory_sm80.h for nan fill. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/cache_operation.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/scale_bias_tile_iterator.h" #include "cutlass/conv/warp/scale_bias_relu_transform.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Element type of scale and bias vectors typename ElementScaleBias_, /// Layout of scale and bias vectors typename LayoutScaleBias_, /// Element type of scale and bias vectors /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class MmaWgradFusionBase { public: ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Element type of scale and bias vectors using ElementScaleBias = ElementScaleBias_; /// Layout of scale and bias vectors using LayoutScaleBias = LayoutScaleBias_; ///< Policy describing tuning details using Policy = Policy_; // // Dependent types // /// Warp-level Mma using Operator = typename Policy::Operator; /// Shape describing the overall GEMM computed from shared memory /// by each warp. using WarpGemm = typename Policy::Operator::Shape; /// Shape describing the number of warps filling the CTA using WarpCount = cutlass::gemm::GemmShape<Shape::kM / WarpGemm::kM, Shape::kN / WarpGemm::kN, Shape::kK / WarpGemm::kK>; /// Number of warp-level GEMM oeprations static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK); /// Number of stages static int const kStages = Stages; /// Tensor reference to the A operand using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>; /// Tensor reference to the B operand using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>; static_assert(kWarpGemmIterations > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); static_assert((kWarpGemmIterations % 2) == 0, "Inner loop iteration must be an even number."); // // Nested structs // /// Shared storage object needed by threadblock-scoped GEMM class SharedStorage { public: // // Type definitions // /// Shape of the A matrix operand in shared memory using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow, Shape::kK * kStages + Policy::SmemPaddingA::kColumn>; /// Shape of the B matrix operand in shared memory using ShapeB = MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow, Shape::kN + Policy::SmemPaddingB::kColumn>; public: // // Data members // /// Buffer for A operand AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A; /// Buffer for B operand AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B; public: // // Methods // /// Returns a layout object for the A matrix CUTLASS_DEVICE static typename Operator::LayoutA LayoutA() { return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn}); } /// Returns a layout object for the B matrix CUTLASS_HOST_DEVICE static typename Operator::LayoutB LayoutB() { return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn}); } /// Returns a TensorRef to the A operand CUTLASS_HOST_DEVICE TensorRefA operand_A_ref() { return TensorRefA{operand_A.data(), LayoutA()}; } /// Returns a TensorRef to the B operand CUTLASS_HOST_DEVICE TensorRefB operand_B_ref() { return TensorRefB{operand_B.data(), LayoutB()}; } }; protected: // // Data members // /// Iterator to load a warp-scoped tile of A operand from shared memory typename Operator::IteratorA warp_tile_iterator_A_; /// Iterator to load a warp-scoped tile of B operand from shared memory typename Operator::IteratorB warp_tile_iterator_B_; public: /// Construct from tensor references CUTLASS_DEVICE MmaWgradFusionBase( ///< Shared storage needed for internal use by threadblock-scoped GEMM SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx) : warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx), warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) {} }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// Iterates over vectors of scale and bias vector in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorScaleBias_, /// Iterates over vectors of scale and bias vector i /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class ImplicitGemmWgradFusionMultistage : public MmaWgradFusionBase<Shape_, typename IteratorScaleBias_::Element, typename IteratorScaleBias_::Layout, Policy_, Stages> { public: ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Iterates over tiles of A operand in global memory using IteratorA = IteratorA_; ///< Iterates over tiles of B operand in global memory using IteratorB = IteratorB_; ///< Iterates over tiles of the scale and bias vectors in global memory using IteratorScaleBias = IteratorScaleBias_; ///< Policy describing tuning details using Policy = Policy_; ///< Base class using Base = MmaWgradFusionBase<Shape_, typename IteratorScaleBias::Element, typename IteratorScaleBias::Layout, Policy_, Stages>; using SmemIteratorA = SmemIteratorA_; using SmemIteratorB = SmemIteratorB_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; // // Dependent types // /// Fragment of accumulator tile using ElementC = typename Policy::Operator::ElementC; using FragmentC = typename Policy::Operator::FragmentC; /// Warp-level Mma using Operator = typename Policy::Operator; /// Internal structure exposed for introspection. struct Detail { /// Number of cp.async instructions to load one stage of operand A static int const AsyncCopyIterationsPerStageA = IteratorA::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const AsyncCopyIterationsPerStageB = IteratorB::ThreadMap::Iterations::kCount; /// Number of stages static int const kStages = Stages; /// Number of cp.async instructions to load on group of operand A static int const kAccessesPerGroupA = (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB = (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; static int const kBBufferSize = ((sizeof(typename Operator::ElementC) == 4) && ((platform::is_same<typename Operator::Policy::Operator::ElementA, typename Operator::ElementA>::value && platform::is_same<typename Operator::Policy::Operator::ElementB, typename Operator::ElementB>::value)) && (Operator::Shape::kM >= 64 && Operator::Shape::kN >= 64)) ? 1 : 2; }; private: using WarpLoadedFragmentA = typename Operator::FragmentA; using WarpLoadedFragmentB = typename Operator::FragmentB; using WarpLoadedFragmentScaleBias = typename IteratorScaleBias::Fragment; using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; private: // // Data members // /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA smem_iterator_A_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB smem_iterator_B_; int warp_idx_m_; int warp_idx_n_; public: /// Construct from tensor references CUTLASS_DEVICE ImplicitGemmWgradFusionMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx) : Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); warp_idx_m_ = warp_idx_mn % Base::WarpCount::kM; warp_idx_n_ = warp_idx_mn / Base::WarpCount::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A_.add_tile_offset( {warp_idx_m_, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_B_.add_tile_offset( {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n_}); } CUTLASS_DEVICE void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB &iterator_B, int group_start_A = 0, int group_start_B = 0) { iterator_A.set_iteration_index(group_start_A); this->smem_iterator_A_.set_iteration_index(group_start_A); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr, iterator_A.get(), iterator_A.valid()); ++iterator_A; ++this->smem_iterator_A_; } } iterator_B.set_iteration_index(group_start_B); this->smem_iterator_B_.set_iteration_index(group_start_B); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / 8; // Uses nan fill for out of bound data cutlass::arch::cp_async_nan<kSrcBytes, kCacheOpB>( dst_ptr, iterator_B.get(), iterator_B.valid()); ++iterator_B; ++this->smem_iterator_B_; } } } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations, ///< destination accumulator tile FragmentC &accum, ///< iterator over A operand in global memory IteratorA iterator_A, ///< iterator over B operand in global memory IteratorB iterator_B, ///< iterator over scale and bias vectors in global memory IteratorScaleBias iterator_B_scale_bias, ///< initial value of accumulator FragmentC const &src_accum, ///< number of iterations per channel int gemm_k_iterations_per_channel = 0, ///< Imaginary strides used for planar-complex only - ignored here int64_t imag_stride_A = 0, int64_t imag_stride_B = 0) { // // Prologue // WarpLoadedFragmentScaleBias warp_loaded_frag_B_scale_bias; iterator_B_scale_bias.add_tile_offset({0, warp_idx_n_}); iterator_B_scale_bias.load(warp_loaded_frag_B_scale_bias); // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) { iterator_A.set_iteration_index(0); this->smem_iterator_A_.set_iteration_index(0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr, iterator_A.get(), iterator_A.valid()); ++iterator_A; ++this->smem_iterator_A_; } iterator_B.set_iteration_index(0); this->smem_iterator_B_.set_iteration_index(0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / 8; // Uses Nan fill for out of bound data cutlass::arch::cp_async_nan<kSrcBytes, kCacheOpB>( dst_ptr, iterator_B.get(), iterator_B.valid()); ++iterator_B; ++this->smem_iterator_B_; } // Move to the next stage iterator_A.advance(); iterator_B.advance(); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); } // Perform accumulation in the 'd' output operand accum = src_accum; // Waits until kStages-2 stages have committed. cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA warp_loaded_frag_A[Detail::kBBufferSize]; WarpLoadedFragmentB warp_loaded_frag_B[2]; WarpTransformedFragmentA warp_transformed_frag_A[Detail::kBBufferSize]; WarpTransformedFragmentB warp_transformed_frag_B[2]; Operator warp_mma; cutlass::conv::warp::WgradScaleBiasReluTransform<WarpTransformedFragmentB, WarpLoadedFragmentScaleBias> elementwise_transform; this->warp_tile_iterator_A_.set_kgroup_index(0); this->warp_tile_iterator_B_.set_kgroup_index(0); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; // Start issuing the first group of the next stage outside of the mainloop copy_tiles_and_advance(iterator_A, iterator_B); int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], warp_loaded_frag_A[0], warp_loaded_frag_B[0]); elementwise_transform(warp_transformed_frag_B[0], warp_loaded_frag_B_scale_bias); // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations > (-Base::kStages + 1);) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. if (Detail::kBBufferSize == 2) { this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % Detail::kBBufferSize]); ++this->warp_tile_iterator_A_; } this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_B_; if (warp_mma_k > 0) { warp_mma.transform(warp_transformed_frag_A[warp_mma_k % Detail::kBBufferSize], warp_transformed_frag_B[warp_mma_k % 2], warp_loaded_frag_A[warp_mma_k % Detail::kBBufferSize], warp_loaded_frag_B[warp_mma_k % 2]); elementwise_transform(warp_transformed_frag_B[warp_mma_k % 2], warp_loaded_frag_B_scale_bias); } warp_mma( accum, warp_transformed_frag_A[warp_mma_k % Detail::kBBufferSize], warp_transformed_frag_B[warp_mma_k % 2], accum ); if (Detail::kBBufferSize == 1) { this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); ++this->warp_tile_iterator_A_; } if (warp_mma_k + 1 == Base::kWarpGemmIterations) { warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % Detail::kBBufferSize], warp_transformed_frag_B[(warp_mma_k + 1) % 2], warp_loaded_frag_A[(warp_mma_k + 1) % Detail::kBBufferSize], warp_loaded_frag_B[(warp_mma_k + 1) % 2]); elementwise_transform( warp_transformed_frag_B[(warp_mma_k + 1) % 2], warp_loaded_frag_B_scale_bias); } // Issue global->shared copies for the next stage int group_start_iteration_A, group_start_iteration_B; if (warp_mma_k + 1 == Base::kWarpGemmIterations) { group_start_iteration_A = 0; group_start_iteration_B = 0; } else { group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA; group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB; } copy_tiles_and_advance(iterator_A, iterator_B, group_start_iteration_A, group_start_iteration_B); if (warp_mma_k + 2 == Base::kWarpGemmIterations) { // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages of cp.async have committed arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_A.advance(); iterator_B.advance(); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_A_.add_tile_offset( {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); this->warp_tile_iterator_B_.add_tile_offset( {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } --gemm_k_iterations; } } } // Insert fence and wait for all outstanding cp.async operations to commit. cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h/0
{ "file_path": "include/cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h", "repo_id": "include", "token_count": 10647 }
18
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Fusion callbacks specializations for the sm90 TMA warp-specialized (ws) epilogue */ #pragma once #include "cutlass/cutlass.h" #include "cute/tensor.hpp" #include "cutlass/epilogue/dispatch_policy.hpp" #include "cutlass/epilogue/fusion/callbacks.hpp" #include "cutlass/epilogue/fusion/sm90_visitor_tma_warpspecialized.hpp" #include "cutlass/epilogue/fusion/sm90_visitor_load_tma_warpspecialized.hpp" #include "cutlass/epilogue/fusion/sm90_visitor_store_tma_warpspecialized.hpp" #include "cutlass/epilogue/fusion/sm90_visitor_compute_tma_warpspecialized.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::epilogue::fusion { ///////////////////////////////////////////////////////////////////////////////////////////////// template <class NodeOp, class... ChildOps> using Sm90EVT = Sm90TreeVisitor<NodeOp, ChildOps...>; // D = alpha * acc template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class ElementOutput, class ElementCompute, class ElementScalar, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::ScaledAcc<ElementOutput, ElementCompute, ElementScalar, RoundStyle>, CtaTileShapeMNK, EpilogueTile > : Sm90EVT<Sm90Compute<multiplies, ElementOutput, ElementCompute, RoundStyle>, Sm90ScalarBroadcast<ElementScalar>, Sm90AccFetch > { using Impl = Sm90EVT<Sm90Compute<multiplies, ElementOutput, ElementCompute, RoundStyle>, Sm90ScalarBroadcast<ElementScalar>, Sm90AccFetch >; using Operation = fusion::ScaledAcc<ElementOutput, ElementCompute, ElementScalar, RoundStyle>; struct Arguments { // Give a name and flat ordering to the fusion callback args ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; // Conversion to the args expected by the visitor implementation // to_underlying_arguments will implicitly call this operator typename Impl::Arguments() const { return { // binary op : alpha * acc {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {} // binary args : multiplies }; // end binary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // D = alpha * acc + beta * C template< class ElementOutput, class ElementCompute, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90LinearCombination = Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + (alpha * acc) Sm90ScalarBroadcast<ElementScalar>, // beta Sm90SrcFetch<ElementSource>, // C Sm90EVT<Sm90Compute<multiplies, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc Sm90ScalarBroadcast<ElementScalar>, // alpha Sm90AccFetch // acc > >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class ElementOutput, class ElementCompute, class ElementSource, class ElementScalar, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::LinearCombination<ElementOutput, ElementCompute, ElementSource, ElementScalar, RoundStyle>, CtaTileShapeMNK, EpilogueTile > : Sm90LinearCombination<typename cutlass::detail::get_unpacked_element_type<ElementOutput>::type, ElementCompute, ElementSource, ElementScalar, RoundStyle> { using Impl = Sm90LinearCombination<typename cutlass::detail::get_unpacked_element_type<ElementOutput>::type, ElementCompute, ElementSource, ElementScalar, RoundStyle>; using Operation = fusion::LinearCombination<ElementOutput, ElementCompute, ElementSource, ElementScalar, RoundStyle>; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; operator typename Impl::Arguments() const { return { // ternary op : beta * C + (alpha * acc) {{beta}, {beta_ptr}}, // leaf args : beta {}, // leaf args : C { // binary op : alpha * acc {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {} // binary args : multiplies }, // end binary op {} // ternary args : multiply_add }; // end ternary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // D = activation(alpha * acc + beta * C) template< template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90LinCombEltAct = Sm90EVT<Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>, // activation(beta * C + (alpha * acc)) Sm90LinearCombination<ElementCompute, ElementCompute, ElementSource, ElementScalar, RoundStyle> // beta * C + (alpha * acc) >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementSource, class ElementScalar, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::LinCombEltAct<ActivationFn, ElementOutput, ElementCompute, ElementSource, ElementScalar, RoundStyle>, CtaTileShapeMNK, EpilogueTile > : Sm90LinCombEltAct<ActivationFn, ElementOutput, ElementCompute, ElementSource, ElementScalar, RoundStyle> { using Impl = Sm90LinCombEltAct<ActivationFn, typename cutlass::detail::get_unpacked_element_type<ElementOutput>::type, ElementCompute, ElementSource, ElementScalar, RoundStyle>; using Operation = fusion::LinCombEltAct<ActivationFn, ElementOutput, ElementCompute, ElementSource, ElementScalar, RoundStyle>; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); operator typename Impl::Arguments() const { return { // unary op: activation(beta * C + (alpha * acc)) { // ternary op : beta * C + (alpha * acc) {{beta}, {beta_ptr}}, // leaf args : beta {}, // leaf args : C { // binary op : alpha * acc {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {} // binary args : multiplies }, // end binary op {} // ternary args : multiply_add }, // end ternary op activation // unary args: activation }; // end unary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // D = alpha * acc + beta * C + per-row bias template< class CtaTileShapeMNK, class ElementOutput, class ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90LinCombPerRowBias = Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + (alpha * acc + bias) Sm90ScalarBroadcast<ElementScalar>, // beta Sm90SrcFetch<ElementSource>, // C Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc + bias Sm90ScalarBroadcast<ElementScalar>, // alpha Sm90AccFetch, // acc Sm90ColBroadcast<0, CtaTileShapeMNK, ElementBias, Stride<_1,_0,int>, AlignmentBias> // bias > >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class ElementOutput, class ElementCompute, class ElementBias, class ElementSource, class ElementScalar, int AlignmentBias, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::LinCombPerRowBias<ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>, CtaTileShapeMNK, EpilogueTile > : Sm90LinCombPerRowBias< CtaTileShapeMNK, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle> { using Impl = Sm90LinCombPerRowBias< CtaTileShapeMNK, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>; using Operation = fusion::LinCombPerRowBias< ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; using StrideBias = Stride<_1,_0,int>; ElementBias const* bias_ptr = nullptr; StrideBias dBias = {}; operator typename Impl::Arguments() const { return { // ternary op : beta * C + (alpha * acc + bias) {{beta}, {beta_ptr}}, // leaf args : beta {}, // leaf args : C { // ternary op : alpha * acc + bias {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {bias_ptr, ElementBias(0), dBias}, // leaf args : bias {} // ternary args : multiply_add }, // end ternary op {} // ternary args : multiply_add }; // end ternary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // D = activation(alpha * acc + beta * C + per-row bias) template< class CtaTileShapeMNK, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90LinCombPerRowBiasEltAct = Sm90EVT<Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>, Sm90LinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle> >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementBias, class ElementSource, class ElementScalar, int AlignmentBias, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::LinCombPerRowBiasEltAct< ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle >, CtaTileShapeMNK, EpilogueTile > : Sm90LinCombPerRowBiasEltAct< CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle > { using Impl = Sm90LinCombPerRowBiasEltAct< CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle >; using Operation = fusion::LinCombPerRowBiasEltAct< ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle >; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; using StrideBias = Stride<_1,_0,int>; ElementBias const* bias_ptr = nullptr; StrideBias dBias = {}; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); operator typename Impl::Arguments() const { return { // unary op : activation(beta * C + (alpha * acc + bias)) { // ternary op : beta * C + (alpha * acc + bias) {{beta}, {beta_ptr}}, // leaf args : beta {}, // leaf args : C { // ternary op : alpha * acc + bias {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {bias_ptr, ElementBias(0), dBias}, // leaf args : bias {} // ternary args : multiply_add }, // end ternary op {} // ternary args : multiply_add }, // end ternary op activation // unary args : activation }; // end unary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // D = activation(alpha * acc + beta * C + per-row bias) // Aux = alpha * acc + beta * C + per-row bias) template< class CtaTileShapeMNK, class EpilogueTile, int Stages, class StrideAux, class SmemLayoutAtom, class CopyOpR2S, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux = ElementOutput, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentAux = 128 / sizeof_bits_v<ElementAux>, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90LinCombPerRowBiasEltActAux = Sm90EVT<Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>, Sm90EVT<Sm90AuxStore<Stages, EpilogueTile, ElementAux, RoundStyle, StrideAux, SmemLayoutAtom, CopyOpR2S, AlignmentAux>, Sm90LinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle> > >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class GmemLayoutTagAux, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux, class ElementBias, class ElementSource, class ElementScalar, int AlignmentAux, int AlignmentBias, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile, class SmemLayoutAtom, class CopyOpR2S > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::LinCombPerRowBiasEltActAux< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >, CtaTileShapeMNK, EpilogueTile, SmemLayoutAtom, CopyOpR2S > : Sm90LinCombPerRowBiasEltActAux< CtaTileShapeMNK, EpilogueTile, StagesD, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpR2S, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle > { using Impl = Sm90LinCombPerRowBiasEltActAux< CtaTileShapeMNK, EpilogueTile, StagesD, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpR2S, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >; using Operation = fusion::LinCombPerRowBiasEltActAux< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; using StrideBias = Stride<_1,_0,int>; ElementBias const* bias_ptr = nullptr; StrideBias dBias = {}; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); using StrideAux = cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>; ElementAux* aux_ptr = nullptr; StrideAux dAux = {}; operator typename Impl::Arguments() const { return { // unary op : activation(store(beta * C + (alpha * acc + bias))) { // unary op : store(beta * C + (alpha * acc + bias)) { // ternary op : beta * C + (alpha * acc + bias) {{beta}, {beta_ptr}}, // leaf args : beta {}, // leaf args : C { // ternary op : alpha * acc + bias {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {bias_ptr, ElementBias(0), dBias}, // leaf args : bias {} // ternary args : multiply_add }, // end ternary op {} // ternary args : multiply_add }, // end ternary op {aux_ptr, dAux} // unary args : store }, // end unary op activation // unary args : activation }; // end unary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // D = per-row alpha * acc + per-row beta * C + per-row bias template< class CtaTileShapeMNK, class ElementOutput, class ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, int AlignmentScalar = 128 / sizeof_bits_v<ElementScalar>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90PerRowLinCombPerRowBias = Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + (alpha * acc + bias) Sm90ColBroadcast<0, CtaTileShapeMNK, ElementScalar, Stride<_1,_0,int>, AlignmentScalar>, // beta Sm90SrcFetch<ElementSource>, // C Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc + bias Sm90ColBroadcast<0, CtaTileShapeMNK, ElementScalar, Stride<_1,_0,int>, AlignmentScalar>, // alpha Sm90AccFetch, // acc Sm90ColBroadcast<0, CtaTileShapeMNK, ElementBias, Stride<_1,_0,int>, AlignmentBias> // bias > >; // D = activation(per-row alpha * acc + per-row beta * C + per-row bias) template< class CtaTileShapeMNK, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, int AlignmentScalar = 128 / sizeof_bits_v<ElementScalar>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90PerRowLinCombPerRowBiasEltAct = Sm90EVT<Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>, Sm90PerRowLinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, AlignmentScalar, RoundStyle> >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementBias, class ElementSource, class ElementScalar, int AlignmentBias, int AlignmentScalar, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::PerRowLinCombPerRowBiasEltAct< ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, AlignmentScalar, RoundStyle >, CtaTileShapeMNK, EpilogueTile > : Sm90PerRowLinCombPerRowBiasEltAct< CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, AlignmentScalar, RoundStyle > { using Impl = Sm90PerRowLinCombPerRowBiasEltAct< CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, AlignmentScalar, RoundStyle >; using Operation = fusion::PerRowLinCombPerRowBiasEltAct< ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, AlignmentScalar, RoundStyle >; struct Arguments { using StrideAlpha = Stride<_1,_0,int>; using StrideBeta = Stride<_1,_0,int>; ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; StrideAlpha dAlpha = {}; StrideBeta dBeta = {}; using StrideBias = Stride<_1,_0,int>; ElementBias const* bias_ptr = nullptr; StrideBias dBias = {}; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); operator typename Impl::Arguments() const { return { // unary op : activation(beta * C + (alpha * acc + bias)) { // ternary op : beta * C + (alpha * acc + bias) {beta_ptr, beta, dBeta}, // leaf args : beta {}, // leaf args : C { // ternary op : alpha * acc + bias {alpha_ptr, alpha, dAlpha}, // leaf args : alpha {}, // leaf args : acc {bias_ptr, ElementBias(0), dBias}, // leaf args : bias {} // ternary args : multiply_add }, // end ternary op {} // ternary args : multiply_add }, // end ternary op activation // unary args : activation }; // end unary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template <typename T> constexpr bool is_fp8_v = cute::is_same_v<T,float_e4m3_t> || cute::is_same_v<T,float_e5m2_t>; // We only apply the scaling factor if output is fp8 template <typename ElementOutput> struct ScaleOutOp { template <typename T> using Op = cutlass::first<T>; }; template <> struct ScaleOutOp<float_e4m3_t> { template <typename T> using Op = cutlass::multiplies<T>; }; template <> struct ScaleOutOp<float_e5m2_t> { template <typename T> using Op = cutlass::multiplies<T>; }; template <typename T> using amax = cutlass::maximum_absolute_value_reduction<T, true>; // propogate nans }; // end namespace detail // D = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias template< class CtaTileShapeMNK, class ElementOutput, class ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90ScaledLinCombPerRowBias = Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementOutput, ElementCompute, RoundStyle>, // beta * C + (alpha * acc + bias) Sm90ScalarBroadcast<ElementScalar, Stride<_0,_0,_0>, 2>, // scale_c * beta Sm90SrcFetch<ElementSource>, // C Sm90EVT<Sm90Compute<homogeneous_multiply_add, ElementCompute, ElementCompute, RoundStyle>, // alpha * acc + bias Sm90ScalarBroadcast<ElementScalar, Stride<_0,_0,_0>, 3>, // scale_a * scale_b * alpha Sm90AccFetch, // acc Sm90ColBroadcast<0, CtaTileShapeMNK, ElementBias, Stride<_1,_0,int>, AlignmentBias> // bias > >; // Z = scale_a * scale_b * alpha * acc + beta * scale_c * C + per-row bias // if D is fp8 // D = scale_d * activation(Z) // else // D = activation(Z) template< class CtaTileShapeMNK, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90ScaledLinCombPerRowBiasEltAct = Sm90EVT<Sm90Compute<detail::ScaleOutOp<ElementOutput>::template Op, ElementOutput, ElementCompute, RoundStyle>, // activation(Z) * scale_d Sm90EVT<Sm90Compute<ActivationFn, ElementCompute, ElementCompute, RoundStyle>, // activation(Z) // Z = scale_a * scale_b * alpha * acc + beta * scale_c * C + per-row bias Sm90ScaledLinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle> >, Sm90ScalarBroadcast<ElementScalar> // scale_d >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementBias, class ElementSource, class ElementScalar, int AlignmentBias, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::ScaledLinCombPerRowBiasEltAct< ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle >, CtaTileShapeMNK, EpilogueTile > : Sm90ScaledLinCombPerRowBiasEltAct< CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle > { using Impl = Sm90ScaledLinCombPerRowBiasEltAct< CtaTileShapeMNK, ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle >; using Operation = fusion::ScaledLinCombPerRowBiasEltAct< ActivationFn, ElementOutput, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle >; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; ElementScalar scale_a = ElementScalar(1); ElementScalar scale_b = ElementScalar(1); ElementScalar scale_c = ElementScalar(1); ElementScalar scale_d = ElementScalar(1); ElementScalar const* scale_a_ptr = nullptr; ElementScalar const* scale_b_ptr = nullptr; ElementScalar const* scale_c_ptr = nullptr; ElementScalar const* scale_d_ptr = nullptr; using StrideBias = Stride<_1,_0,int>; ElementBias const* bias_ptr = nullptr; StrideBias dBias = {}; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); operator typename Impl::Arguments() const { return { // binary op : activation((scale_c * beta) * C + ((scale_a * scale_b * alpha) * acc + bias)) * scale_d { // unary op : activation((scale_c * beta) * C + ((scale_a * scale_b * alpha) * acc + bias)) { // ternary op : (scale_c * beta) * C + ((scale_a * scale_b * alpha) * acc + bias) {{scale_c, beta}, {scale_c_ptr, beta_ptr} }, // leaf args : (scale_c * beta) {}, // leaf args : C { // ternary op : (scale_a * scale_b * alpha) * acc + bias {{scale_a, scale_b, alpha}, {scale_a_ptr, scale_b_ptr, alpha_ptr} }, // leaf args : (scale_a * scale_b * alpha) {}, // leaf args : acc {bias_ptr, ElementBias(0), dBias}, // leaf args : bias {} // ternary args : multiply_add }, // end ternary op {} // ternary args : multiply_add }, // end ternary op activation // unary args : activation }, // end unary op {{scale_d}, {scale_d_ptr} }, // leaf args : scale_d {} // binary args : multiplies or first }; // end binary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias // if D is fp8 // amax_d = max(abs(elements in activation(Z))) // D = scale_d * activation(Z) // else // D = activation(Z) // if Aux is fp8 // amax_aux = max(abs(elements in Z)) // Aux = scale_aux * Z // else // Aux = Z // fp8 aux specialization template< class CtaTileShapeMNK, class EpilogueTile, int StagesD, class StrideAux, class SmemLayoutAtom, class CopyOpR2S, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux = ElementOutput, class ElementAmax = ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentAux = 128 / sizeof_bits_v<ElementAux>, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90ScaledLinCombPerRowBiasEltActAmaxAuxFp8 = Sm90SplitTreeVisitor< // Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias Sm90ScaledLinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>, // D = activation(Z) * scale_d, amax_d = max(abs(elements in D)) Sm90EVT<Sm90Compute<detail::ScaleOutOp<ElementOutput>::template Op, ElementOutput, ElementCompute, RoundStyle>, // activation(Z) * scale_d Sm90EVT<Sm90ScalarReduction<detail::amax, atomic_maximum, ElementAmax, ElementCompute, RoundStyle>, // amax_d Sm90EVT<Sm90Compute<ActivationFn, ElementCompute, ElementCompute, RoundStyle>, // activation(Z) Sm90SplitTreeFetch // Z > >, Sm90ScalarBroadcast<ElementScalar> // scale_d >, // Aux = Z * scale_aux, amax_aux = max(abs(elements in Aux)) Sm90EVT<Sm90AuxStore<StagesD, EpilogueTile, ElementAux, RoundStyle, StrideAux, SmemLayoutAtom, CopyOpR2S, AlignmentAux>, // store(Aux) Sm90EVT<Sm90Compute<cutlass::multiplies, ElementCompute, ElementCompute, RoundStyle>, // Z * scale_aux Sm90EVT<Sm90ScalarReduction<detail::amax, atomic_maximum, ElementAmax, ElementCompute, RoundStyle>, // amax_aux Sm90SplitTreeFetch // Z >, Sm90ScalarBroadcast<ElementScalar> // scale_aux > > >; // non-fp8 aux specialization // lets us use some EVT specializations such as relu + uint1b_t aux template< class CtaTileShapeMNK, class EpilogueTile, int StagesD, class StrideAux, class SmemLayoutAtom, class CopyOpR2S, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux = ElementOutput, class ElementAmax = ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentAux = 128 / sizeof_bits_v<ElementAux>, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90ScaledLinCombPerRowBiasEltActAmaxAuxNotFp8 = // D = activation(Z) * scale_d, amax_d = max(abs(elements in D)) Sm90EVT<Sm90Compute<detail::ScaleOutOp<ElementOutput>::template Op, ElementOutput, ElementCompute, RoundStyle>, // activation(Z) * scale_d Sm90EVT<Sm90ScalarReduction<detail::amax, atomic_maximum, ElementAmax, ElementCompute, RoundStyle>, // amax_d Sm90EVT<Sm90Compute<ActivationFn, ElementCompute, ElementCompute, RoundStyle>, // activation(Z) Sm90EVT<Sm90AuxStore<StagesD, EpilogueTile, ElementAux, RoundStyle, StrideAux, SmemLayoutAtom, CopyOpR2S, AlignmentAux>, // Aux = Z // Z = scale_a * scale_b * alpha * acc + scale_c * beta * C + per-row bias Sm90ScaledLinCombPerRowBias<CtaTileShapeMNK, ElementCompute, ElementCompute, ElementBias, ElementSource, ElementScalar, AlignmentBias, RoundStyle>, > > >, Sm90ScalarBroadcast<ElementScalar> // scale_d >; // dispatcher template< class CtaTileShapeMNK, class EpilogueTile, int StagesD, class StrideAux, class SmemLayoutAtom, class CopyOpR2S, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux = ElementOutput, class ElementAmax = ElementCompute, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentAux = 128 / sizeof_bits_v<ElementAux>, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90ScaledLinCombPerRowBiasEltActAmaxAux = conditional_t<detail::is_fp8_v<ElementAux>, Sm90ScaledLinCombPerRowBiasEltActAmaxAuxFp8< CtaTileShapeMNK, EpilogueTile, StagesD, StrideAux, SmemLayoutAtom, CopyOpR2S, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar,AlignmentAux, AlignmentBias, RoundStyle >, Sm90ScaledLinCombPerRowBiasEltActAmaxAuxNotFp8< CtaTileShapeMNK, EpilogueTile, StagesD, StrideAux, SmemLayoutAtom, CopyOpR2S, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle > >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class GmemLayoutTagAux, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux, class ElementAmax, class ElementBias, class ElementSource, class ElementScalar, int AlignmentAux, int AlignmentBias, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile, class SmemLayoutAtom, class CopyOpR2S > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::ScaledLinCombPerRowBiasEltActAmaxAux< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >, CtaTileShapeMNK, EpilogueTile, SmemLayoutAtom, CopyOpR2S > : Sm90ScaledLinCombPerRowBiasEltActAmaxAux< CtaTileShapeMNK, EpilogueTile, StagesD, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpR2S, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle > { using Impl = Sm90ScaledLinCombPerRowBiasEltActAmaxAux< CtaTileShapeMNK, EpilogueTile, StagesD, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpR2S, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >; using Operation = fusion::ScaledLinCombPerRowBiasEltActAmaxAux< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementAmax, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; ElementScalar scale_a = ElementScalar(1); ElementScalar scale_b = ElementScalar(1); ElementScalar scale_c = ElementScalar(1); ElementScalar scale_d = ElementScalar(1); ElementScalar const* scale_a_ptr = nullptr; ElementScalar const* scale_b_ptr = nullptr; ElementScalar const* scale_c_ptr = nullptr; ElementScalar const* scale_d_ptr = nullptr; ElementScalar scale_aux = ElementScalar(1); ElementScalar const* scale_aux_ptr = nullptr; using StrideBias = Stride<_1,_0,int>; ElementBias const* bias_ptr = nullptr; StrideBias dBias = {}; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); ElementAmax* amax_D_ptr = nullptr; ElementAmax* amax_aux_ptr = nullptr; using StrideAux = cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>; ElementAux* aux_ptr = nullptr; StrideAux dAux = {}; operator typename Impl::Arguments() const { // Only compute amax_d if D is fp8 ElementAmax* amax_D_ptr_ = nullptr; if constexpr (detail::is_fp8_v<ElementOutput>) { amax_D_ptr_ = amax_D_ptr; } // Aux is fp8 -> DAG arguments if constexpr (detail::is_fp8_v<ElementAux>) { typename Impl::Arguments args; // always use structured binding to unpack DAG args since it may or may not be a tuple auto& [Z_args, aux_args, D_args] = args; Z_args = { // ternary op : (scale_c * beta) * C + ((scale_a * scale_b * alpha) * acc + bias) {{scale_c, beta}, {scale_c_ptr, beta_ptr} }, // leaf args : (scale_c * beta) {}, // leaf args : C { // ternary op : (scale_a * scale_b * alpha) * acc + bias {{scale_a, scale_b, alpha}, {scale_a_ptr, scale_b_ptr, alpha_ptr} }, // leaf args : (scale_a * scale_b * alpha) {}, // leaf args : acc {bias_ptr, ElementBias(0), dBias}, // leaf args : bias {} // ternary args : multiply_add }, // end ternary op {} // ternary args : multiply_add }; // end ternary op D_args = { // binary op : activation(Z) * scale_d or activation(Z) { // unary op : reduce(activation(Z)) { // unary op : activation(Z) {}, // leaf args : Z activation // unary args : activation }, // end unary op {amax_D_ptr_} // unary args : reduce }, // end unary op {{scale_d}, {scale_d_ptr} }, // leaf args : scale_d {} // binary args : multiplies or first }; // end binary op aux_args = { // unary op : store(Aux) { // binary op : Z * scale_d or Z { // unary op : reduce(Z) {}, // leaf args : Z {amax_aux_ptr} // unary args : reduce }, // end unary op {{scale_aux}, {scale_aux_ptr} }, // leaf args : scale_d {} // binary args : multiplies }, // end binary op {aux_ptr, dAux} // unary args : store }; // end unary op return args; } // Aux is not fp8 -> Tree arguments else { return { // binary op : activation(Z) * scale_d or activation(Z) { // unary op : reduce(activation(Z)) { // unary op : activation(Z) { // unary op : store(Z) { // ternary op : (scale_c * beta) * C + ((scale_a * scale_b * alpha) * acc + bias) {{scale_c, beta}, {scale_c_ptr, beta_ptr} }, // leaf args : (scale_c * beta) {}, // leaf args : C { // ternary op : (scale_a * scale_b * alpha) * acc + bias {{scale_a, scale_b, alpha}, {scale_a_ptr, scale_b_ptr, alpha_ptr} }, // leaf args : (scale_a * scale_b * alpha) {}, // leaf args : acc {bias_ptr, ElementBias(0), dBias }, // leaf args : bias {} // ternary args : multiply_add }, // end ternary op {} // ternary args : multiply_add }, // end ternary op {aux_ptr, dAux} // unary args : store }, // end unary op activation // unary args : activation }, // end unary op {amax_D_ptr_} // unary args : reduce }, // end unary op {{scale_d},{scale_d_ptr}}, // leaf args : scale_d {} // binary args : multiplies or first }; // end binary op } } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template< class CtaTileShapeMNK, class EpilogueTile, int Stages, class StrideAux, class SmemLayoutAtom, class CopyOpS2R, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentAux = 128 / sizeof_bits_v<ElementAux>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90LinCombDeEltAct = Sm90EVT<Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>, // activation(beta * C + (alpha * acc), aux) Sm90LinearCombination<ElementCompute, ElementCompute, ElementSource, ElementScalar, RoundStyle>, // beta * C + (alpha * acc) Sm90AuxLoad<Stages, EpilogueTile, ElementAux, StrideAux, SmemLayoutAtom, CopyOpS2R, AlignmentAux> // aux >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class GmemLayoutTagAux, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux, class ElementSource, class ElementScalar, int AlignmentAux, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile, class SmemLayoutAtom, class CopyOpS2R > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::LinCombDeEltAct< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementSource, ElementScalar, AlignmentAux, RoundStyle >, CtaTileShapeMNK, EpilogueTile, SmemLayoutAtom, CopyOpS2R > : Sm90LinCombDeEltAct< CtaTileShapeMNK, EpilogueTile, StagesC, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpS2R, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementSource, ElementScalar, AlignmentAux, RoundStyle > { using Impl = Sm90LinCombDeEltAct< CtaTileShapeMNK, EpilogueTile, StagesC, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpS2R, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementSource, ElementScalar, AlignmentAux, RoundStyle >; using Operation = fusion::LinCombDeEltAct< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementSource, ElementScalar, AlignmentAux, RoundStyle >; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); using StrideAux = cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>; ElementAux const* aux_ptr = nullptr; StrideAux dAux = {}; operator typename Impl::Arguments() const { return { // binary op : activation(beta * C + (alpha * acc), aux) { // ternary op : beta * C + (alpha * acc) {{beta}, {beta_ptr}}, // leaf args : beta {}, // leaf args : C { // binary op : alpha * acc {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {} // binary args : multiplies }, // end binary op {} // ternary args : multiply_add }, // end ternary op {aux_ptr, ElementAux(0), dAux}, // leaf args : aux activation // binary args : activation }; // end binary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template< class CtaTileShapeMNK, class EpilogueTile, int Stages, class StrideAux, class SmemLayoutAtom, class CopyOpS2R, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux = ElementOutput, class ElementBias = ElementOutput, class ElementSource = ElementOutput, class ElementScalar = ElementCompute, int AlignmentAux = 128 / sizeof_bits_v<ElementAux>, int AlignmentBias = 128 / sizeof_bits_v<ElementBias>, FloatRoundStyle RoundStyle = FloatRoundStyle::round_to_nearest > using Sm90LinCombDeEltActDePerRowBias = Sm90EVT<Sm90Compute<cutlass::epilogue::thread::Identity, ElementOutput, ElementCompute, RoundStyle>, // Identity for final conversion Sm90EVT<Sm90ColReduction<plus, plus, plus, 0, CtaTileShapeMNK, ElementBias, ElementCompute, RoundStyle, Stride<_1,_0,int>, AlignmentBias>, Sm90LinCombDeEltAct<CtaTileShapeMNK, EpilogueTile, Stages, StrideAux, SmemLayoutAtom, CopyOpS2R, ActivationFn, ElementCompute, ElementCompute, ElementAux, ElementSource, ElementScalar, AlignmentAux, RoundStyle> > >; template < int StagesC, int StagesD, int FragmentSize, bool ReuseSmemC, bool DelayTmaStore, class GmemLayoutTagAux, template <class> class ActivationFn, class ElementOutput, class ElementCompute, class ElementAux, class ElementBias, class ElementSource, class ElementScalar, int AlignmentAux, int AlignmentBias, FloatRoundStyle RoundStyle, class CtaTileShapeMNK, class EpilogueTile, class SmemLayoutAtom, class CopyOpS2R > struct FusionCallbacks< epilogue::Sm90TmaWarpSpecialized<StagesC, StagesD, FragmentSize, ReuseSmemC, DelayTmaStore>, fusion::LinCombDeEltActDePerRowBias< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >, CtaTileShapeMNK, EpilogueTile, SmemLayoutAtom, CopyOpS2R > : Sm90LinCombDeEltActDePerRowBias< CtaTileShapeMNK, EpilogueTile, StagesC, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpS2R, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle > { using Impl = Sm90LinCombDeEltActDePerRowBias< CtaTileShapeMNK, EpilogueTile, StagesC, cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>, SmemLayoutAtom, CopyOpS2R, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >; using Operation = fusion::LinCombDeEltActDePerRowBias< GmemLayoutTagAux, ActivationFn, ElementOutput, ElementCompute, ElementAux, ElementBias, ElementSource, ElementScalar, AlignmentAux, AlignmentBias, RoundStyle >; struct Arguments { ElementScalar alpha = ElementScalar(1); ElementScalar beta = ElementScalar(0); ElementScalar const* alpha_ptr = nullptr; ElementScalar const* beta_ptr = nullptr; using ActivationArguments = typename Sm90Compute<ActivationFn, ElementOutput, ElementCompute, RoundStyle>::Arguments; ActivationArguments activation = ActivationArguments(); using StrideAux = cutlass::gemm::TagToStrideC_t<GmemLayoutTagAux>; ElementAux const* aux_ptr = nullptr; StrideAux dAux = {}; using StrideBias = Stride<_1,_0,int>; ElementBias* dbias_ptr = nullptr; StrideBias dDbias = {}; operator typename Impl::Arguments() const { return { // unary op : identity/convert { // unary op : reduce(activation(beta * C + (alpha * acc), aux)) { // binary op : activation(beta * C + (alpha * acc), aux) { // ternary op : beta * C + (alpha * acc) {{beta}, {beta_ptr}}, // leaf args : beta {}, // leaf args : C { // binary op : alpha * acc {{alpha}, {alpha_ptr}}, // leaf args : alpha {}, // leaf args : acc {} // binary args : multiplies }, // end binary op {} // ternary args : multiply_add }, // end ternary op {aux_ptr, ElementAux(0), dAux}, // leaf args : aux activation // binary args : activation }, // end binary op {dbias_ptr, ElementCompute(0), dDbias} // unary args : reduce }, // end unary op {} // unary args : identity/convert }; // end unary op } }; // Ctor inheritance using Impl::Impl; }; ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template <class FusionOpOrCallbacks, class = cute::void_t<>> struct get_element_aux { using type = void; }; template <class FusionOpOrCallbacks> struct get_element_aux<FusionOpOrCallbacks, cute::void_t<typename FusionOpOrCallbacks::ElementAux>> { using type = typename FusionOpOrCallbacks::ElementAux; }; template <class NodeOp, class... ChildOps> struct get_element_aux<Sm90TreeVisitor<NodeOp, ChildOps...>, cute::void_t<>> { using type = typename get_element_aux<NodeOp>::type; }; template <class... Ts> struct get_element_aux<FusionCallbacks<Ts...>, cute::void_t<typename FusionCallbacks<Ts...>::Operation>> { private: using Operation = typename FusionCallbacks<Ts...>::Operation; public: using type = typename get_element_aux<Operation>::type; }; } // namespace cutlass:epilogue::fusion::detail template <class Callbacks> using get_element_aux_t = typename detail::get_element_aux<Callbacks>::type; } // namespace cutlass::epilogue::fusion ///////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/epilogue/fusion/sm90_callbacks_tma_warpspecialized.hpp/0
{ "file_path": "include/cutlass/epilogue/fusion/sm90_callbacks_tma_warpspecialized.hpp", "repo_id": "include", "token_count": 21889 }
19
/*************************************************************************************************** * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination operations with a generic element-wise activation function. Scaling factors are applied to operands A, B, and C. The pre-activation auxiliary output is also returned. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/scale_type.h" #include "cutlass/epilogue/thread/linear_combination_generic.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements. /// /// Aux = ((alpha * scale_a * scale_b) * accumulator) + ((beta * scale_c) * source) + bias /// D = activation(Aux) /// template < template<typename T> class ActivationFunctor, typename ElementOutput_, ///< Data type used to load and store tensors typename ElementAuxOutput_, ///< Data type used to store auxiliary output int Count, ///< Number of elements computed per operation ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling FloatRoundStyle Round = FloatRoundStyle::round_to_nearest, bool IsHeavy = false > class LinearCombinationGenericWithScalingAndAbsMax { public: using ElementOutput = ElementOutput_; using ElementAuxOutput = ElementAuxOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; using ElementScalingFactor = ElementAccumulator_; /// Data type used for absolute maximum value using ElementAbsmax = float; static bool const kIsScalingAndAmaxAuxOutputNeeded = (platform::is_same<ElementAuxOutput, cutlass::float_e4m3_t>::value || platform::is_same<ElementAuxOutput, cutlass::float_e5m2_t>::value); static bool const kIsScalingAndAmaxOutputNeeded = (platform::is_same<ElementOutput, cutlass::float_e4m3_t>::value || platform::is_same<ElementOutput, cutlass::float_e5m2_t>::value); static bool const kIsHeavy = IsHeavy; static int const kCount = Count; static const ScaleType::Kind kScale = Scale; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAuxOutput = Array<ElementAuxOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using FragmentCompute = Array<ElementCompute, kCount>; static FloatRoundStyle const kRound = Round; /// Host-constructable parameters structure struct Params { struct ActivationParams : LinearCombinationGenericParams<ElementCompute>, GenericActivationTraits<ActivationFunctor<ElementCompute>>::Arguments { using LinearCombinationGenericParams<ElementCompute>::LinearCombinationGenericParams; }; ActivationParams activation; ElementScalingFactor const* scale_a_ptr = nullptr; ///< pointer to a scalar - if not null, loads it from memory ElementScalingFactor const* scale_b_ptr = nullptr; ///< pointer to b scalar - if not null, loads it from memory ElementScalingFactor const* scale_c_ptr = nullptr; ///< pointer to c scalar - if not null, loads it from memory ElementScalingFactor const* scale_d_ptr = nullptr; ///< pointer to d scalar - if not null, loads it from memory ElementScalingFactor const* scale_aux_ptr = nullptr; ///< pointer to aux scalar - if not null, loads it from memory ElementAbsmax * abs_max_aux_ptr = nullptr; ///< pointer to location to store amax of Aux ElementAbsmax * abs_max_D_ptr = nullptr; ///< pointer to location to store amax of D CUTLASS_HOST_DEVICE Params() : scale_a_ptr(nullptr), scale_b_ptr(nullptr), scale_c_ptr(nullptr), scale_d_ptr(nullptr), scale_aux_ptr(nullptr), abs_max_aux_ptr(nullptr), abs_max_D_ptr(nullptr) {} CUTLASS_HOST_DEVICE Params(ActivationParams activation_params, ElementScalingFactor const* scale_a_ptr, ElementScalingFactor const* scale_b_ptr, ElementScalingFactor const* scale_c_ptr, ElementScalingFactor const* scale_d_ptr, ElementScalingFactor const* scale_aux_ptr, ElementAbsmax * abs_max_aux_ptr, ElementAbsmax * abs_max_D_ptr) : activation(activation_params), scale_a_ptr(scale_a_ptr), scale_b_ptr(scale_b_ptr), scale_c_ptr(scale_c_ptr), scale_d_ptr(scale_d_ptr), scale_aux_ptr(scale_aux_ptr), abs_max_aux_ptr(abs_max_aux_ptr), abs_max_D_ptr(abs_max_D_ptr) {} }; private: // // Data members // Params params_; bool skip_elementwise_; // Scaling factors for output and auxiliary output ElementCompute scale_d_; ElementCompute scale_aux_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LinearCombinationGenericWithScalingAndAbsMax(Params const &params) : params_(params), skip_elementwise_(false), scale_d_(ElementCompute(params.scale_d_ptr ? *(params.scale_d_ptr) : ElementScalingFactor(1))), scale_aux_(ElementCompute(params.scale_aux_ptr ? *(params.scale_aux_ptr) : ElementScalingFactor(1))) { params_.activation.alpha = (params.activation.alpha_ptr ? *params.activation.alpha_ptr : params.activation.alpha); params_.activation.beta = (params.activation.beta_ptr ? *params.activation.beta_ptr : params.activation.beta); auto scale_a = ElementCompute(params.scale_a_ptr ? *(params.scale_a_ptr) : ElementScalingFactor(1)); auto scale_b = ElementCompute(params.scale_b_ptr ? *(params.scale_b_ptr) : ElementScalingFactor(1)); auto scale_c = ElementCompute(params.scale_c_ptr ? *(params.scale_c_ptr) : ElementScalingFactor(1)); multiplies<ElementCompute> multiply; params_.activation.alpha = multiply(params.activation.alpha, multiply(scale_a, scale_b)); params_.activation.beta = multiply(params.activation.beta, scale_c); } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { if (Scale == ScaleType::NoBetaScaling) return true; if (Scale == ScaleType::OnlyAlphaScaling) return false; if (Scale == ScaleType::Nothing) return false; return params_.activation.beta != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { params_.activation.beta = ElementCompute(1); } // Only the final partition should perform the activation function // and scale the output and auxiliary output values. if (k_partition != k_partition_count - 1) { skip_elementwise_ = true; scale_d_ = ElementCompute(1.); scale_aux_ = ElementCompute(1.); } } /// Computes linear scaling: /// Aux = (alpha * scale_a * scale_b * accumulator) + (beta * scale_c * source) + bias /// D = activation(Aux) CUTLASS_HOST_DEVICE void operator()( FragmentCompute& output, FragmentCompute& aux_output, FragmentAccumulator const &accumulator, FragmentCompute const& bias, FragmentOutput const &source) { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_source = source_converter(source); FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> multiply; plus<FragmentCompute> add; multiply_add<FragmentCompute> mul_add_accumulator; ActivationFunctor<FragmentCompute> activation; if (Scale == ScaleType::NoBetaScaling) { intermediate = converted_source; intermediate = mul_add_accumulator(params_.activation.alpha, converted_accumulator, intermediate); } else if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = multiply(params_.activation.beta, converted_source); intermediate = mul_add_accumulator(params_.activation.alpha, converted_accumulator, intermediate); } intermediate = add(intermediate, bias); aux_output = intermediate; if constexpr (GenericActivationTraits<ActivationFunctor<ElementCompute>>::IsArgumentsNeeded) { output = skip_elementwise_ ? intermediate : activation(intermediate, params_.activation); } else { output = skip_elementwise_ ? intermediate : activation(intermediate); } } /// Computes linear scaling: /// Aux = (alpha * scale_a * scale_b * accumulator) + bias /// D = activation(Aux) CUTLASS_DEVICE void operator()( FragmentCompute& output, FragmentCompute& aux_output, FragmentAccumulator const &accumulator, FragmentCompute const& bias) { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; FragmentCompute converted_accumulator = accumulator_converter(accumulator); // Perform binary operations FragmentCompute intermediate; multiplies<FragmentCompute> multiply; plus<FragmentCompute> add; ActivationFunctor<FragmentCompute> activation; if (Scale == ScaleType::Nothing) { intermediate = converted_accumulator; } else { intermediate = multiply(params_.activation.alpha, converted_accumulator); } intermediate = add(intermediate, bias); aux_output = intermediate; if constexpr (GenericActivationTraits<ActivationFunctor<FragmentCompute>>::IsArgumentsNeeded) { output = skip_elementwise_ ? intermediate : activation(intermediate, params_.activation); } else { output = skip_elementwise_ ? intermediate : activation(intermediate); } } CUTLASS_HOST_DEVICE ElementAbsmax* get_ptr_output_abs_max() const { return params_.abs_max_D_ptr; } CUTLASS_HOST_DEVICE ElementAbsmax* get_ptr_aux_output_abs_max() const { return params_.abs_max_aux_ptr; } CUTLASS_HOST_DEVICE ElementCompute get_scale_d() const { return scale_d_; } CUTLASS_HOST_DEVICE ElementCompute get_scale_aux() const { return scale_aux_; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
include/cutlass/epilogue/thread/linear_combination_generic_with_scaling.h/0
{ "file_path": "include/cutlass/epilogue/thread/linear_combination_generic_with_scaling.h", "repo_id": "include", "token_count": 4504 }
20
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #if !defined(__CUDACC_RTC__) #include <type_traits> #include <utility> #endif #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "cutlass/cutlass.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/gemm/gemm.h" #include "cutlass/transform/pitch_linear_thread_map.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// // // This is used for metaprogramming epilogue functors. If they define // `static bool const kIsHeavy = true;`, then the epilogue functor itself is // not inlined. This results in smaller code and is advantageous if the epilogue // functor consists of many instructions. // // If the epilogue functor does not define `kIsHeavy` or if it is `false`, then // the behavior from CUTLASS 2.5 and before is retained. The epilogue is fully // unrolled and inlined. // template<class> struct TypeSink { typedef void type; }; template<class T> using TypeSinkT = typename TypeSink<T>::type; template<class T, class=void> struct IsEpilogueFunctorHeavy { static bool const value = false; }; template<class T> struct IsEpilogueFunctorHeavy<T, TypeSinkT< decltype( T::kIsHeavy ) > > { static bool const value = T::kIsHeavy; }; //////////////////////////////////////////////////////////////////////////////// /// Base class for epilogues defining warp-level template < typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpShape_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape) int FragmentsPerIteration = 1 > class EpilogueBase { public: using Shape = Shape_; using WarpShape = WarpShape_; static int const kPartitionsK = PartitionsK; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using Padding = Padding_; /// Output layout is always row-major using Layout = layout::RowMajor; /// The complete warp-level accumulator tile using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile; /// Accumulator element using ElementAccumulator = typename AccumulatorTile::Element; /// Number of warps using WarpCount = gemm::GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, kPartitionsK >; /// Use this to control the granularity of one epilogue 'iteration' static int const kFragmentsPerIteration = FragmentsPerIteration; public: /// Shared storage allocation needed by the epilogue struct SharedStorage { // // Type definitions // /// Element type of shared memory using Element = typename WarpTileIterator::Element; /// Tensor reference to shared memory allocation using TensorRef = typename WarpTileIterator::TensorRef; /// Layout of shared memory allocation using Layout = typename WarpTileIterator::Layout; /// Logical shape of the shared memory tile written to by all warps. using Shape = MatrixShape< WarpCount::kM * WarpTileIterator::Shape::kRow * WarpCount::kK, WarpCount::kN * WarpTileIterator::Shape::kColumn >; /// Shape of the shared memory allocation for the epilogue using StorageShape = MatrixShape< (Shape::kRow + Padding::kRow) * kFragmentsPerIteration, Shape::kColumn + Padding::kColumn >; // // Data members // AlignedBuffer<Element, StorageShape::kCount> storage; // // Methods // /// Returns a pointer to the shared memory buffer CUTLASS_DEVICE Element *data() { return storage.data(); } /// Returns a tensor reference to the shared memory buffer CUTLASS_DEVICE TensorRef reference() { return TensorRef( storage.data(), Layout::packed({StorageShape::kRow, StorageShape::kColumn})); } }; protected: // // Data members // SharedStorage &shared_storage_; /// Stores a warp's fragment of accumulators to SMEM WarpTileIterator warp_tile_iterator_; public: /// Constructor CUTLASS_DEVICE EpilogueBase( SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx ///< Id of thread within warp ): shared_storage_(shared_storage), warp_tile_iterator_(shared_storage.reference(), lane_idx) { // Compute warp location within threadblock tile by mapping the warp_id to three coordinates: // // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_k = warp_idx / (WarpCount::kM * WarpCount::kN); int warp_mn = warp_idx % (WarpCount::kM * WarpCount::kN); int warp_m = warp_mn % WarpCount::kM; int warp_n = warp_mn / WarpCount::kM; MatrixCoord warp_offset{warp_k * WarpCount::kM + warp_m, warp_n}; warp_tile_iterator_.add_tile_offset(warp_offset); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/epilogue/threadblock/epilogue_base.h/0
{ "file_path": "include/cutlass/epilogue/threadblock/epilogue_base.h", "repo_id": "include", "token_count": 2588 }
21
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Visitor tree compute operations for the CUTLASS 2x epilogue */ #pragma once #include "cutlass/epilogue/threadblock/fusion/visitor_2x.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::epilogue::threadblock { using namespace cute; using namespace detail; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// // // N-nary Elementwise Compute Operation // ///////////////////////////////////////////////////////////////////////////////////////////////// template< template <class> class ComputeFn, class ElementOutput, class ElementCompute, FloatRoundStyle RoundStyle, class = void > struct VisitorCompute : VisitorImpl2x<> { using VisitorImpl2x<>::VisitorImpl2x; struct Callbacks : EmptyCallbacks { template <typename ElementAccumulator, typename... ElementInputs, int FragmentSize> CUTLASS_DEVICE Array<ElementOutput, FragmentSize> visit(int iter_idx, int row_idx, int column_idx, int frg_idx, Array<ElementAccumulator, FragmentSize> const& frg_acc, Array<ElementInputs, FragmentSize> const&... frg_inputs) { return transform_apply(cute::make_tuple(frg_inputs...), [&] (auto&& frg_input) { using ElementInput = typename cute::remove_cvref_t<decltype(frg_input)>::Element; using ConvertInput = NumericArrayConverter<ElementCompute, ElementInput, FragmentSize, RoundStyle>; ConvertInput convert_input{}; return convert_input(frg_input); }, [&] (auto&&... cvt_frg_inputs) { using ComputeOutput = ComputeFn<Array<ElementCompute, FragmentSize>>; using ConvertOutput = NumericArrayConverter<ElementOutput, ElementCompute, FragmentSize, RoundStyle>; ComputeOutput compute_output{}; ConvertOutput convert_output{}; return convert_output(compute_output(cvt_frg_inputs...)); } ); } }; template <class ProblemShape> CUTLASS_DEVICE auto get_callbacks( gemm::GemmCoord threadblock_tile_offset, int thread_idx, ProblemShape problem_shape ) { return Callbacks(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::epilogue::threadblock /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/epilogue/threadblock/fusion/visitor_compute.hpp/0
{ "file_path": "include/cutlass/epilogue/threadblock/fusion/visitor_compute.hpp", "repo_id": "include", "token_count": 1255 }
22
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/epilogue/threadblock/output_tile_thread_map.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Tile iterator used to load output tile from shared memory in epilogue. /// /// Satisfies: ReadableTileIterator /// template < typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap) typename Element_, ///< Element data type int MaxAlignment = ThreadMap_::kElementsPerAccess * sizeof_bits<Element_>::value / 8 > class SharedLoadIterator { public: using ThreadMap = ThreadMap_; using Shape = typename ThreadMap::TileShape; using Element = Element_; using Layout = layout::RowMajor; using TensorRef = TensorRef<Element, Layout>; using ConstTensorRef = typename TensorRef::ConstTensorRef; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = MatrixCoord; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; static int const kMinAlignment = ThreadMap_::kElementsPerAccess * sizeof_bits<Element_>::value / 8; static int const kAlignment = (MaxAlignment < kMinAlignment ? MaxAlignment : kMinAlignment); static int const kThreads = ThreadMap::kThreads; /// Fragment object using Fragment = Array< Element, ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow * ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>; /// Memory access size using AccessType = AlignedArray< Element, ThreadMap::kElementsPerAccess, kAlignment>; /// Vector type used for SMEM loads using LoadType = AlignedArray< Element, const_min(128 / sizeof_bits<Element>::value, ThreadMap::kElementsPerAccess), const_min(16, kAlignment) >; static int const kLoadsPerAccess = AccessType::kElements / LoadType::kElements; private: // // Data members // /// Byte-level pointer uint8_t *byte_pointer_; /// Stride along adjacent rows int stride_; public: // // Methods // /// Constructor CUTLASS_DEVICE SharedLoadIterator( TensorRef ref, int thread_idx ): byte_pointer_(reinterpret_cast<uint8_t *>(ref.data())), stride_((ref.stride(0) * sizeof_bits<Element>::value) / 8) { TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx); // Initialize pointer byte_pointer_ += thread_offset.row() * stride_ + thread_offset.column() * sizeof(AccessType) / kElementsPerAccess; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_DEVICE void add_tile_offset(TensorCoord const &offset) { byte_pointer_ += offset.row() * Shape::kRow * stride_ + offset.column() * Shape::kColumn * sizeof_bits<Element>::value / 8; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { uint8_t const *byte_pointer = byte_pointer_ + row * ThreadMap::Delta::kRow * stride_ + group * ThreadMap::Delta::kGroup* stride_ + cluster * ThreadMap::Delta::kCluster * stride_ + pointer_offset * sizeof_bits<Element>::value / 8; int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); LoadType *frag_ptr = reinterpret_cast<LoadType *>(&frag); LoadType const *memory_pointer = reinterpret_cast<LoadType const *>(byte_pointer); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { int frag_idx = frag_row_idx * ThreadMap::Iterations::kColumn + column; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kLoadsPerAccess; ++v) { frag_ptr[frag_idx * kLoadsPerAccess + v] = memory_pointer[(column * ThreadMap::Delta::kColumn / kElementsPerAccess) * kLoadsPerAccess + v]; } } } } } } /// Loads a fragment from memory CUTLASS_DEVICE void set_smem_base_address(Index address) { } /// Loads a fragment CUTLASS_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/epilogue/threadblock/shared_load_iterator.h/0
{ "file_path": "include/cutlass/epilogue/threadblock/shared_load_iterator.h", "repo_id": "include", "token_count": 2462 }
23
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic structures needed for implementing the warp-scoped phase of the epilogue. These quantities assume a 'column-major' arrangement of TensorOp instructions, of which a row-oriented slice is visible per iteration. */ #pragma once #include "cutlass/matrix_shape.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/gemm.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Policy details related to the epilogue template < typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape) typename InterleavedTileShape, ///< shape of indivisible instruction-level arrangement (concept: GemmShape) typename ElementC, ///< Accumulator layout typename Layout ///< target shared memory layout > struct VoltaTensorOpPolicy; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major template < typename WarpShape_ ///< shape of warp-level GEMM (concept: GemmShape) > struct VoltaTensorOpPolicy<WarpShape_, gemm::GemmShape<32, 32, 4>, half_t, layout::RowMajor> { using WarpShape = WarpShape_; using InterleavedTileShape = gemm::GemmShape<32, 32, 4>; using ElementC = half_t; using Layout = layout::RowMajor; /// Shape of one warp-levelinstruction using InstructionShape = gemm::GemmShape<16, 16, 4>; /// Number of mma operations performed for one 32x32x4 interleaved tile using MmaIterations = MatrixShape< InterleavedTileShape::kM / InstructionShape::kM, InterleavedTileShape::kN / InstructionShape::kN >; /// Number of 32x32x4 interleaved tiles performed to cover the warp-level GEMM shape using TileIterations = MatrixShape< WarpShape::kM / InterleavedTileShape::kM, WarpShape::kN / InterleavedTileShape::kN >; /// Number of accumulator elements owned by each thread per Mma static int const kElementsPerMma = 8; static int const kRowsPerIteration = 16; // // Hard-coded constants regarding Tensor Operations // /// Number of accumulator elements stored per memory instruction to shared memory static int const kElementsPerAccess = 4; /// Number of accesses performed per interleaved tile static int const kAccessesPerInterleavedTile = 4; /// Total number of iterations needed to cover the entire tile static int const kIterations = TileIterations::kRow * 2; // // Derived types // /// Array type for aligned memory accesses using AccessType = AlignedArray<ElementC, kElementsPerAccess>; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< ElementC, kElementsPerAccess * kAccessesPerInterleavedTile * TileIterations::kColumn>; /// This is the complete warp-level accumulator tile. using AccumulatorTile = Array< ElementC, TileIterations::kCount * MmaIterations::kCount * kElementsPerMma>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major template < typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape) > struct VoltaTensorOpPolicy<WarpShape_, gemm::GemmShape<32, 32, 4>, float, layout::RowMajor> { using WarpShape = WarpShape_; using InterleavedTileShape = gemm::GemmShape<32, 32, 4>; using ElementC = float; using Layout = layout::RowMajor; /// Shape of one warp-levelinstruction using InstructionShape = gemm::GemmShape<16, 16, 4>; /// Number of mma operations performed for one 32x32x4 interleaved tile using MmaIterations = MatrixShape< InterleavedTileShape::kM / InstructionShape::kM, InterleavedTileShape::kN / InstructionShape::kN >; /// Number of 32x32x4 interleaved tiles performed to cover the warp-level GEMM shape using TileIterations = MatrixShape< WarpShape::kM / InterleavedTileShape::kM, WarpShape::kN / InterleavedTileShape::kN >; /// Number of accumulator elements owned by each thread per Mma static int const kElementsPerMma = 8; static int const kRowsPerIteration = 16; // // Hard-coded constants regarding Tensor Operations // /// Number of accumulator elements stored per memory instruction to shared memory static int const kElementsPerAccess = 2; /// Number of accesses performed per interleaved tile static int const kAccessesPerInterleavedTile = 8; /// Number of rows per interleaved tile static int const kRowsPerMmaTile = 2; /// Total number of iterations needed to cover the entire tile static int const kIterations = TileIterations::kRow * MmaIterations::kRow; // // Derived types // /// Array type for aligned memory accesses using AccessType = AlignedArray<ElementC, kElementsPerAccess>; /// This is the fragment size produced by one access of the iterator. using Fragment = Array< ElementC, kElementsPerAccess * kAccessesPerInterleavedTile * TileIterations::kColumn>; /// This is the complete warp-level accumulator tile. using AccumulatorTile = Array< ElementC, TileIterations::kCount * MmaIterations::kCount * kElementsPerMma>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/epilogue/warp/volta_tensor_op_policy.h/0
{ "file_path": "include/cutlass/epilogue/warp/volta_tensor_op_policy.h", "repo_id": "include", "token_count": 2098 }
24
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/numeric_types.h" #include "cutlass/pipeline/pipeline.hpp" #include "cutlass/transform/collective/sm90_wgmma_transpose.hpp" #include "cutlass/trace.h" #include "cute/arch/cluster_sm90.hpp" #include "cute/arch/copy_sm90.hpp" #include "cute/algorithm/functional.hpp" #include "cute/atom/mma_atom.hpp" #include "cute/algorithm/gemm.hpp" #include "cute/tensor_predicate.hpp" #include "cute/numeric/arithmetic_tuple.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::collective { using namespace cute; ///////////////////////////////////////////////////////////////////////////////////////////////// // WarpSpecialized Mainloop template < int Stages, class ClusterShape_, class TileShape_, class KernelSchedule, class ElementA_, class StrideA_, class ElementB_, class StrideB_, class TiledMma_, class GmemTiledCopyA_, class SmemLayoutAtomA_, class SmemCopyAtomA_, class TransformA_, class GmemTiledCopyB_, class SmemLayoutAtomB_, class SmemCopyAtomB_, class TransformB_> struct CollectiveMma< MainloopSm90CpAsyncGmmaRmemAWarpSpecialized<Stages,ClusterShape_,KernelSchedule>, TileShape_, ElementA_, StrideA_, ElementB_, StrideB_, TiledMma_, GmemTiledCopyA_, SmemLayoutAtomA_, SmemCopyAtomA_, TransformA_, GmemTiledCopyB_, SmemLayoutAtomB_, SmemCopyAtomB_, TransformB_> { // // Type Aliases // using DispatchPolicy = MainloopSm90CpAsyncGmmaRmemAWarpSpecialized<Stages,ClusterShape_,KernelSchedule>; using TileShape = TileShape_; using ClusterShape = ClusterShape_; using ElementA = ElementA_; using StrideA = StrideA_; using ElementB = ElementB_; using StrideB = StrideB_; using TiledMma = TiledMma_; using ElementAccumulator = typename TiledMma::ValTypeC; using GmemTiledCopyA = GmemTiledCopyA_; using GmemTiledCopyB = GmemTiledCopyB_; using SmemLayoutAtomA = SmemLayoutAtomA_; using SmemLayoutAtomB = SmemLayoutAtomB_; using SmemCopyAtomA = SmemCopyAtomA_; using SmemCopyAtomB = SmemCopyAtomB_; using CtaShape_MNK = decltype(shape_div(TileShape{}, ClusterShape{})); // Swap and transpose A/B for A k-major layout and B mn-major layout since WGMMA is k-major only (e.g. tf32, Fp32, Int8, Fp8 WGMMA) static constexpr bool IsLayoutAkBmn = cute::is_same_v<gemm::detail::StrideToLayoutTagA_t<StrideA>, layout::RowMajor> && cute::is_same_v<gemm::detail::StrideToLayoutTagB_t<StrideB>, layout::RowMajor>; static constexpr bool IsInputSizeTwoBytes = sizeof(ElementA) == 2 && sizeof(ElementB) == 2; static constexpr bool SwapAB = !IsInputSizeTwoBytes && IsLayoutAkBmn; using InternalGmemTiledCopyA = cute::conditional_t<!SwapAB, GmemTiledCopyA, GmemTiledCopyB>; using InternalGmemTiledCopyB = cute::conditional_t<!SwapAB, GmemTiledCopyB, GmemTiledCopyA>; using InternalSmemLayoutAtomA = cute::conditional_t<!SwapAB, SmemLayoutAtomA, SmemLayoutAtomB>; using InternalSmemLayoutAtomB = cute::conditional_t<!SwapAB, SmemLayoutAtomB, SmemLayoutAtomA>; using InternalSmemCopyAtomA = cute::conditional_t<!SwapAB, SmemCopyAtomA, SmemCopyAtomB>; using InternalSmemCopyAtomB = cute::conditional_t<!SwapAB, SmemCopyAtomB, SmemCopyAtomA>; // TMA converts f32 input to tf32 when copying from GMEM to SMEM // For all other types, cast to size equivalent uint type to avoid any rounding by TMA. static constexpr bool ConvertF32toTF32A = cute::is_same_v<float, ElementA>; static constexpr bool ConvertF32toTF32B = cute::is_same_v<float, ElementB>; using ConvertedElementA = cute::conditional_t<ConvertF32toTF32A, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementA>>>; using ConvertedElementB = cute::conditional_t<ConvertF32toTF32B, tfloat32_t, uint_bit_t<sizeof_bits_v<ElementB>>>; using InternalElementA = cute::conditional_t<!SwapAB, ConvertedElementA, ConvertedElementB>; using InternalElementB = cute::conditional_t<!SwapAB, ConvertedElementB, ConvertedElementA>; using InternalStrideA = cute::conditional_t<!SwapAB, StrideA, StrideB>; using InternalStrideB = cute::conditional_t<!SwapAB, StrideB, StrideA>; using TransformA = TransformA_; using TransformB = TransformB_; using ArchTag = typename DispatchPolicy::ArchTag; using MainloopPipeline = cutlass::PipelineAsync<DispatchPolicy::Stages>; using PipelineState = typename MainloopPipeline::PipelineState; using PipelineParams = typename MainloopPipeline::Params; static_assert(cute::rank(InternalSmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)"); static_assert((size<0>(TileShape{}) % size<0>(InternalSmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert((size<2>(TileShape{}) % size<1>(InternalSmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert(cute::rank(InternalSmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)"); static_assert((size<1>(TileShape{}) % size<0>(InternalSmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); static_assert((size<2>(TileShape{}) % size<1>(InternalSmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape."); using SmemLayoutA = decltype(tile_to_shape( InternalSmemLayoutAtomA{}, make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}))); using SmemLayoutB = decltype(tile_to_shape( InternalSmemLayoutAtomB{}, make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}))); // If A mn-layout and B mn-layout, transposing B matrix since WGMMA is k-major only (e.g. tf32, fp32, fp8, int8). static constexpr bool IsLayoutAmnBmn = cute::is_same_v<gemm::detail::StrideToLayoutTagA_t<StrideA>, layout::ColumnMajor> && cute::is_same_v<gemm::detail::StrideToLayoutTagB_t<StrideB>, layout::RowMajor>; static constexpr bool TransposeB = !IsInputSizeTwoBytes && IsLayoutAmnBmn; using TransposeOperandB = decltype(cutlass::transform::collective::detail::make_transpose_operand_b( 0, 0, TiledMma{}, SmemLayoutB{}, InternalSmemLayoutAtomB{}, InternalElementB{}, cute::bool_constant<TransposeB>{})); static_assert(DispatchPolicy::Stages >= 2, "Specialization requires Stages set to value 2 or more."); static_assert(not cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeA>::value && cute::is_base_of<cute::GMMA::DescriptorIterator, typename TiledMma::FrgTypeB>::value, "MMA atom must source A from rmem and B operand from smem_desc for this mainloop."); using GmmaSmemLayoutAtomB = decltype(transform::collective::detail::gmma_smem_transpose_or_passthrough< TransposeB, InternalSmemLayoutAtomB, InternalElementB>()); // SmemLayoutB for GMMA is different from SmemLayoutB for TMA if TransposeB using GmmaSmemLayoutB = decltype(tile_to_shape( GmmaSmemLayoutAtomB{}, make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{}))); static_assert(!SwapAB || !TransposeB, "Cannot SwapAB and TransposeB at the same time."); static_assert(TransposeB xor (cute::is_same_v<SmemLayoutB, GmmaSmemLayoutB>), "Should be same layout if not TransposeB."); static_assert(!TransposeB || (cutlass::bits_to_bytes(size<1>(SmemLayoutB{}) * sizeof_bits<InternalElementB>::value)) == 128, "SmemLayoutB K must be 128bytes to be transposed."); static_assert(!transform::collective::detail::use_universal_transposition<InternalSmemLayoutAtomB, InternalElementB>(), "Warp specialized ARF kernels have not supported universal B transposition yet."); struct SharedStorage { struct TensorStorage : cute::aligned_struct<256> { cute::array_aligned<typename TiledMma::ValTypeA, cute::cosize_v<SmemLayoutA>, 256> smem_A; cute::array_aligned<typename TiledMma::ValTypeB, cute::cosize_v<SmemLayoutB>, 256> smem_B; } tensors; using PipelineStorage = typename MainloopPipeline::SharedStorage; PipelineStorage pipeline; }; using TensorStorage = typename SharedStorage::TensorStorage; using PipelineStorage = typename SharedStorage::PipelineStorage; // Host side kernel arguments struct Arguments { ElementA const* ptr_A = nullptr; StrideA dA{}; ElementB const* ptr_B = nullptr; StrideB dB{}; uint32_t mma_promotion_interval = 4; }; // Device side kernel params struct Params { InternalElementA const* ptr_A = nullptr; InternalStrideA dA{}; InternalElementB const* ptr_B = nullptr; InternalStrideB dB{}; uint32_t mma_promotion_interval = 4; }; // // Methods // template <class ProblemShape> static constexpr Params to_underlying_arguments( [[maybe_unused]] ProblemShape const& problem_shape, Arguments const& args, [[maybe_unused]] void* workspace) { if constexpr (not SwapAB) { return { reinterpret_cast<InternalElementA const*>(args.ptr_A), args.dA, reinterpret_cast<InternalElementB const*>(args.ptr_B), args.dB }; } else { return { reinterpret_cast<InternalElementA const*>(args.ptr_B), args.dB, reinterpret_cast<InternalElementB const*>(args.ptr_A), args.dA }; } } template<class ProblemShape> static bool can_implement( ProblemShape const& problem_shape, [[maybe_unused]] Arguments const& args) { auto problem_shape_MNKL = append<4>(problem_shape, 1); auto [M,N,K,L] = problem_shape_MNKL; bool implementable = true; implementable = implementable && cutlass::detail::check_alignment<GmemTiledCopyA::NumValSrc>(cute::make_shape(M,K,L), StrideA{}); implementable = implementable && cutlass::detail::check_alignment<GmemTiledCopyB::NumValSrc>(cute::make_shape(N,K,L), StrideB{}); if (!implementable) { CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Problem Size doesn't meet the minimum alignment requirements for TMA.\n"); } return implementable; } static constexpr int K_PIPE_MAX = DispatchPolicy::Stages; static constexpr int K_PIPE_MMAS = 1; /// Perform a collective-scoped matrix multiply-accumulate /// Producer Perspective template < class TensorA, class TensorB, class KTileIterator, class ResidueMNK > CUTLASS_DEVICE void load( MainloopPipeline pipeline, PipelineState smem_pipe_write, TensorA const& gA_in, TensorB const& gB_in, KTileIterator k_tile_iter, int k_tile_count, ResidueMNK residue_mnk, int thread_idx, TensorStorage& shared_tensors) { using namespace cute; static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident."); static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident."); Tensor sA = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE) Tensor sB = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE) // Shift tensor so residue_k is at origin (Can't read any k_coord < residue_k) // This aligns the tensor with BLK_K for all but the 0th k_tile Tensor gA = domain_offset(make_coord(0, get<2>(residue_mnk), 0), gA_in); Tensor gB = domain_offset(make_coord(0, get<2>(residue_mnk), 0), gB_in); // Partition the copying of A and B tiles across the threads InternalGmemTiledCopyA gmem_tiled_copy_a; InternalGmemTiledCopyB gmem_tiled_copy_b; auto gmem_thr_copy_a = gmem_tiled_copy_a.get_slice(thread_idx); auto gmem_thr_copy_b = gmem_tiled_copy_b.get_slice(thread_idx); Tensor tAgA = gmem_thr_copy_a.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k) Tensor tAsA = gmem_thr_copy_a.partition_D(sA); // (ACPY,ACPY_M,ACPY_K,PIPE) Tensor tBgB = gmem_thr_copy_b.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k) Tensor tBsB = gmem_thr_copy_b.partition_D(sB); // (BCPY,BCPY_N,BCPY_K,PIPE) // Allocate predicate tensors for m and n Tensor tApA = make_tensor<bool>(make_shape(size<1>(tAsA), size<2>(tAsA)), Stride<_1,_0>{}); Tensor tBpB = make_tensor<bool>(make_shape(size<1>(tBsB), size<2>(tBsB)), Stride<_1,_0>{}); // Construct identity layout for sA and sB Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA))); // (BLK_M,BLK_K) -> (blk_m,blk_k) Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB))); // (BLK_N,BLK_K) -> (blk_n,blk_k) // Repeat the partitioning with identity layouts Tensor tAcA = gmem_thr_copy_a.partition_S(cA); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k) Tensor tBcB = gmem_thr_copy_b.partition_S(cB); // (BCPY,BCPY_N,BCPY_K) -> (blk_n,blk_k) // Set predicates for m bounds CUTLASS_PRAGMA_UNROLL for (int m = 0; m < size<0>(tApA); ++m) { tApA(m,0) = get<0>(tAcA(0,m,0)) < get<0>(residue_mnk); // blk_m coord < residue_m } // Set predicates for n bounds CUTLASS_PRAGMA_UNROLL for (int n = 0; n < size<0>(tBpB); ++n) { tBpB(n,0) = get<0>(tBcB(0,n,0)) < get<1>(residue_mnk); // blk_n coord < residue_n } // 0-th stage with predication on k to account for residue { // LOCK smem_pipe_write for _writing_ pipeline.producer_acquire(smem_pipe_write); int write_stage = smem_pipe_write.index(); // Copy gmem to smem for *k_tile_iter, predicating for k residue Tensor tAgAk = tAgA(_,_,_,*k_tile_iter); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < size<2>(tAsA); ++k) { if (get<1>(tAcA(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gA shifted) copy_if(gmem_tiled_copy_a, tApA(_,k), tAgAk(_,_,k), tAsA(_,_,k,write_stage)); } else { clear(tAsA(_,_,k,write_stage)); } } Tensor tBgBk = tBgB(_,_,_,*k_tile_iter); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < size<2>(tBsB); ++k) { if (get<1>(tBcB(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gB shifted) copy_if(gmem_tiled_copy_b, tBpB(_,k), tBgBk(_,_,k), tBsB(_,_,k,write_stage)); } else { clear(tBsB(_,_,k,write_stage)); } } ++k_tile_iter; --k_tile_count; // UNLOCK smem_pipe_write pipeline.producer_commit(smem_pipe_write, cutlass::arch::cpasync_barrier_arrive); // Advance smem_pipe_write ++smem_pipe_write; } // Mainloop CUTLASS_PRAGMA_NO_UNROLL for ( ; k_tile_count > 0; --k_tile_count) { // LOCK smem_pipe_write for _writing_ pipeline.producer_acquire(smem_pipe_write); int write_stage = smem_pipe_write.index(); // Copy gmem to smem for *k_tile_iter copy_if(gmem_tiled_copy_a, tApA, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,write_stage)); copy_if(gmem_tiled_copy_b, tBpB, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,write_stage)); ++k_tile_iter; // UNLOCK smem_pipe_write pipeline.producer_commit(smem_pipe_write, cutlass::arch::cpasync_barrier_arrive); // Advance smem_pipe_write ++smem_pipe_write; } } /// Perform a Producer Epilogue to prevent early exit of blocks in a Cluster CUTLASS_DEVICE void load_tail( MainloopPipeline pipeline, PipelineState smem_pipe_write) { // Issue the epilogue waits /* This helps avoid early exit of blocks in Cluster * Waits for all stages to either be released (all * Consumer UNLOCKs), or if the stage was never used * then would just be acquired since the phase was * still inverted from make_producer_start_state */ pipeline.producer_tail(smem_pipe_write); } /// Perform a collective-scoped matrix multiply-accumulate /// Consumer Perspective template < class FrgTensorC > CUTLASS_DEVICE void mma(MainloopPipeline pipeline, PipelineState smem_pipe_read, FrgTensorC& accum, int k_tile_count, int thread_idx, TensorStorage& shared_tensors, Params const& mainloop_params) { using namespace cute; static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident."); static_assert(cute::rank(SmemLayoutA{}) == 3, "Smem layout must be rank 3."); static_assert(cute::rank(SmemLayoutB{}) == 3, "Smem layout must be rank 3."); static_assert(cute::rank(InternalSmemLayoutAtomA{}) == 2, "InternalSmemLayoutAtomA must be rank 2."); static_assert(cute::rank(InternalSmemLayoutAtomB{}) == 2, "InternalSmemLayoutAtomB must be rank 2."); static_assert(!cute::is_void_v<InternalSmemCopyAtomA>, "SM90 GMMA mainloops must specify a non-void copy atom for smem sourced instructions."); static_assert(cute::is_void_v<InternalSmemCopyAtomB>, "SM90 GMMA mainloops cannot have a non-void copy atom for smem sourced instructions."); // Obtain warp index int warp_idx = canonical_warp_idx_sync(); [[maybe_unused]] int warp_group_thread_idx = thread_idx % 128; Tensor sA_ = make_tensor(make_smem_ptr(shared_tensors.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE) Tensor sA = as_position_independent_swizzle_tensor(sA_); // (BLK_M,BLK_K,PIPE) Tensor sB_ = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE) Tensor sB = as_position_independent_swizzle_tensor(sB_); // (BLK_M,BLK_K,PIPE) // If TransposeB, GMMA will read from transposed B layout SMEM Tensor gmma_sB = make_tensor(make_smem_ptr(shared_tensors.smem_B.data()), GmmaSmemLayoutB{}); // (BLK_N,BLK_K,PIPE) // // Define C accumulators and A/B partitioning // // Layout of warp group to thread mapping static_assert(stride<0>(typename TiledMma::BLayout{}) == 0 and size<0>(typename TiledMma::BLayout{}) == NumThreadsPerWarpGroup, "Stride of the first mode must be 0 and the size of the mode must be NumThreadsPerWarpGroup"); constexpr int MmaWarpGroups = size(TiledMma{}) / NumThreadsPerWarpGroup; Layout warp_group_thread_layout = make_layout(Int<MmaWarpGroups>{}, Int<NumThreadsPerWarpGroup>{}); int warp_group_idx = __shfl_sync(0xFFFFFFFF, thread_idx / NumThreadsPerWarpGroup, 0); TiledMma tiled_mma; auto mma_thread_slice = tiled_mma.get_thread_slice(thread_idx); auto mma_warpgroup_slice = tiled_mma.get_slice(warp_group_thread_layout(warp_group_idx)); // Allocate fragments and descriptors Tensor tCsA = mma_thread_slice.partition_A(sA); Tensor tCrA = mma_thread_slice.partition_fragment_A(sA(_,_,Int<0>{})); // (MMA,MMA_M,MMA_K,PIPE) Tensor tCsB = mma_warpgroup_slice.partition_B(gmma_sB); // (MMA,MMA_N,MMA_K,PIPE) Tensor tCrB = mma_warpgroup_slice.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K,PIPE) // // Copy Atom A retiling // auto smem_tiled_copy_A = make_tiled_copy_A(InternalSmemCopyAtomA{}, tiled_mma); auto smem_thr_copy_A = smem_tiled_copy_A.get_thread_slice(thread_idx); Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA); // (CPY,CPY_M,CPY_K) CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // CPY_M CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCrA_copy_view)); // CPY_K CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<2>(accum)); // N CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCsB)); // K CUTE_STATIC_ASSERT_V(size<3>(tCsA) == size<3>(tCsB)); // PIPE CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE // // PIPELINED MAIN LOOP // static_assert((0 <= K_PIPE_MMAS) && (K_PIPE_MMAS < K_PIPE_MAX), "ERROR : Incorrect number of MMAs in flight"); // We release buffers to producer warps(dma load) with some mmas in flight PipelineState smem_pipe_release = smem_pipe_read; tiled_mma.accumulate_ = GMMA::ScaleOut::Zero; TransposeOperandB transpose = cutlass::transform::collective::detail::make_transpose_operand_b( warp_idx, warp_group_thread_idx, tiled_mma, SmemLayoutB{}, InternalSmemLayoutAtomB{}, InternalElementB{}, cute::bool_constant<TransposeB>{}); warpgroup_fence_operand(accum); // first k tile { pipeline.consumer_wait(smem_pipe_read); int read_stage = smem_pipe_read.index(); ++smem_pipe_read; bool skip_wait = (pipeline.consumer_try_wait(smem_pipe_read) == BarrierStatus::WaitDone); // copy smem->rmem for A operand copy(smem_tiled_copy_A, tCsA(_,_,0,read_stage), tCrA_copy_view(_,_,0)); // transpose B operand in SMEM transpose(sB, gmma_sB, read_stage, 0); // Unroll the K mode manually to set scale D to 1 CUTLASS_PRAGMA_UNROLL for (int k_block = 0; k_block < size<2>(tCrA) - 1; ++k_block) { copy(smem_tiled_copy_A, tCsA(_,_,k_block + 1,read_stage), tCrA_copy_view(_,_,k_block + 1)); if (k_block == 0) { transpose(sB, gmma_sB, read_stage, 1); transpose.synchronize(); } warpgroup_arrive(); // (V,M) x (V,N) => (V,M,N) cute::gemm(tiled_mma, tCrA(_,_,k_block), tCrB(_,_,k_block,read_stage), accum); tiled_mma.accumulate_ = GMMA::ScaleOut::One; warpgroup_commit_batch(); } warpgroup_wait<2>(); if (k_tile_count - 1 > 0) { if (!skip_wait) { pipeline.consumer_wait(smem_pipe_read); } copy(smem_tiled_copy_A, tCsA(_,_,0,smem_pipe_read.index()), tCrA_copy_view(_,_,0)); transpose(sB, gmma_sB, smem_pipe_read.index(), 0); } warpgroup_arrive(); // (V,M) x (V,N) => (V,M,N) cute::gemm(tiled_mma, tCrA(_,_,size<2>(tCrA) - 1), tCrB(_,_,size<2>(tCrA) - 1,read_stage), accum); tiled_mma.accumulate_ = GMMA::ScaleOut::One; warpgroup_commit_batch(); warpgroup_wait<2>(); } warpgroup_fence_operand(accum); // Mainloop GMMAs --k_tile_count; CUTLASS_PRAGMA_NO_UNROLL for ( ; k_tile_count > 1; --k_tile_count) { // // Compute on k_tile // int read_stage = smem_pipe_read.index(); ++smem_pipe_read; bool skip_wait = (pipeline.consumer_try_wait(smem_pipe_read) == BarrierStatus::WaitDone); warpgroup_fence_operand(accum); // Unroll the K mode manually to set scale D to 1 CUTLASS_PRAGMA_UNROLL for (int k_block = 0; k_block < size<2>(tCrA); ++k_block) { if (k_block == size<2>(tCrA) - 1) { if (!skip_wait) { pipeline.consumer_wait(smem_pipe_read); } copy(smem_tiled_copy_A, tCsA(_,_,0,smem_pipe_read.index()), tCrA_copy_view(_,_,0)); // transpose B operand in SMEM transpose(sB, gmma_sB, smem_pipe_read.index(), 0); } else { copy(smem_tiled_copy_A, tCsA(_,_,k_block + 1,read_stage), tCrA_copy_view(_,_,k_block + 1)); // transpose B operand in SMEM if (k_block < 2) { transpose.synchronize(k_block); // make transpose of k_block available } if (k_block == 0) { transpose(sB, gmma_sB, read_stage, 1); } } warpgroup_arrive(); // (V,M) x (V,N) => (V,M,N) cute::gemm(tiled_mma, tCrA(_,_,k_block), tCrB(_,_,k_block,read_stage), accum); tiled_mma.accumulate_ = GMMA::ScaleOut::One; warpgroup_commit_batch(); warpgroup_wait<2>(); if (k_block == 1) { // release prior barrier pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it ++smem_pipe_release; } } warpgroup_fence_operand(accum); } warpgroup_fence_operand(accum); if (k_tile_count > 0) { // // Compute on k_tile // int read_stage = smem_pipe_read.index(); warpgroup_fence_operand(accum); // Unroll the K mode manually to set scale D to 1 CUTLASS_PRAGMA_UNROLL for (int k_block = 0; k_block < size<2>(tCrA) - 1; ++k_block) { copy(smem_tiled_copy_A, tCsA(_,_,k_block + 1,read_stage), tCrA_copy_view(_,_,k_block + 1)); if (k_block < 2) { transpose.synchronize(k_block); // make k_block transpose available } if (k_block == 0) { transpose(sB, gmma_sB, read_stage, 1); } warpgroup_arrive(); // (V,M) x (V,N) => (V,M,N) cute::gemm(tiled_mma, tCrA(_,_,k_block), tCrB(_,_,k_block,read_stage), accum); tiled_mma.accumulate_ = GMMA::ScaleOut::One; warpgroup_commit_batch(); warpgroup_wait<2>(); if (k_block == 1) { // release prior barrier pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it ++smem_pipe_release; } } warpgroup_arrive(); // (V,M) x (V,N) => (V,M,N) cute::gemm(tiled_mma, tCrA(_,_,size<2>(tCrA) - 1), tCrB(_,_,size<2>(tCrA) - 1,read_stage), accum); tiled_mma.accumulate_ = GMMA::ScaleOut::One; warpgroup_commit_batch(); warpgroup_wait<2>(); warpgroup_fence_operand(accum); } warpgroup_fence_operand(accum); } /// Perform a Consumer Epilogue to release all buffers CUTLASS_DEVICE void mma_tail(MainloopPipeline pipeline, PipelineState smem_pipe_release, int k_tile_count) { // Prologue GMMAs int prologue_mma_count = min(K_PIPE_MMAS, k_tile_count); k_tile_count -= prologue_mma_count; smem_pipe_release.advance(k_tile_count); // Wait on all GMMAs to complete warpgroup_wait<0>(); for (int count = 0; count < prologue_mma_count; ++count) { pipeline.consumer_release(smem_pipe_release); // UNLOCK smem_pipe_release, done _computing_ on it ++smem_pipe_release; } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm::collective /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/gemm/collective/sm90_mma_multistage_gmma_rs_warpspecialized.hpp/0
{ "file_path": "include/cutlass/gemm/collective/sm90_mma_multistage_gmma_rs_warpspecialized.hpp", "repo_id": "include", "token_count": 12968 }
25
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/arch.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/gemm/kernel/sparse_gemm.h" #include "cutlass/gemm/kernel/default_gemm_sparse.h" #include "cutlass/gemm/device/default_gemm_configuration.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// /*! Gemm device-level operator. This is an interface to efficient CUTLASS GEMM kernels that may be invoked from host code. The contributions of this class are: 1. At compile time, it maps data types and high-level structural parameters onto specific CUTLASS components. 2. At runtime, it maps logical arguments to GEMM problems to kernel parameters. 3. At runtime, it launches kernels on the device. The intent is to provide a convenient mechanism for interacting with most plausible GEMM configurations for each supported architecture. Consequently, not all parameters are exposed to the top-level interface. Rather, sensible defaults at each level of the CUTLASS hierarchy are selected to tradeoff simplicity of the interface with flexibility. We expect most configurations to be specified at this level. Applications with more exotic requirements may construct their kernels of interest using CUTLASS components at the threadblock, warp, and thread levels of abstraction. CUTLASS exposes computations using the functor design pattern in which objects compose some internal state with an overloaded function call operator. This enables decoupling of initialization from execution, possibly reducing overhead during steady state phases of application execution. CUTLASS device-level operators expose an Arguments structure encompassing each logical input to the computation. This is distinct from the kernel-level Params structure pattern which contains application-specific precomputed state needed by the device code. Example of a CUTLASS GEMM operator implementing the functionality of cuBLAS's SGEMM NN is as follows: // // Instantiate the CUTLASS GEMM operator. // cutlass::gemm::device::Gemm< float, cutlass::layout::ColumnMajor, float, cutlass::layout::ColumnMajor, float, cutlass::layout::ColumnMajor > gemm_op; // // Launch the GEMM operation on the device // cutlass::Status status = gemm_op({ {m, n, k}, // GemmCoord problem_size, {A, lda}, // TensorRef<float, layout::ColumnMajor> ref_A, {B, ldb}, // TensorRef<float, layout::ColumnMajor> ref_B, {C, ldc}, // TensorRef<float, layout::ColumnMajor> ref_C, {D, ldd}, // TensorRef<float, layout::ColumnMajor> ref_D, {alpha, beta} // EpilogueOutputOp::Params epilogue_op_params }); A simplified view of the template is listed below. template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Element type for C and D matrix operands typename ElementC, /// Layout type for C and D matrix operands typename LayoutC, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for. This is the minimum SM that /// supports the intended feature. The device kernel can be built /// targeting any SM larger than this number. typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages > class Gemm; */ template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Element type for internal accumulation typename ElementAccumulator_ = ElementC_, /// Operator class tag typename OperatorClass_ = arch::OpClassSimt, /// Tag indicating architecture to tune for typename ArchTag_ = arch::Sm70, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::InstructionShape, /// Epilogue output operator typename EpilogueOutputOp_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle_ = typename threadblock::GemmIdentityThreadblockSwizzle<>, /// Number of stages used in the pipelined mainloop int Stages = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kStages, /// Access granularity of A matrix in units of elements int AlignmentA = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentA, /// Access granularity of B matrix in units of elements int AlignmentB = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentB, /// If true, kernel supports split-K with serial reduction bool SplitKSerial = false, /// Operation performed by GEMM typename Operator_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::Operator> class SparseGemm { public: using ElementA = ElementA_; using LayoutA = LayoutA_; using TensorRefA = TensorRef<ElementA const, LayoutA>; using ElementB = ElementB_; using LayoutB = LayoutB_; using TensorRefB = TensorRef<ElementB const, LayoutB>; using ElementC = ElementC_; using LayoutC = LayoutC_; using TensorRefC = TensorRef<ElementC const, LayoutC>; using TensorRefD = TensorRef<ElementC, LayoutC>; using ElementAccumulator = ElementAccumulator_; using OperatorClass = OperatorClass_; using ArchTag = ArchTag_; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using EpilogueOutputOp = EpilogueOutputOp_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; using MathOperator = Operator; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentB = AlignmentB; static int const kAlignmentC = EpilogueOutputOp::kCount; static bool const kSplitKSerial = SplitKSerial; static ComplexTransform const kTransformA = ComplexTransform::kNone; static ComplexTransform const kTransformB = ComplexTransform::kNone; /// Define the kernel using GemmKernel = typename kernel::DefaultSparseGemm< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, kStages, kSplitKSerial, Operator >::GemmKernel; using ElementE = typename GemmKernel::ElementE; using LayoutE = typename GemmKernel::LayoutE; static int const kAlignmentE = 128 / sizeof_bits<ElementE>::value; static int const kSparse = GemmKernel::kSparse; static int const kMetaSizeInBits = GemmKernel::kMetaSizeInBits; static int const kElementsPerElementE = GemmKernel::kElementsPerElementE; /// Argument structure struct Arguments { // // Data members // GemmCoord problem_size; TensorRef<ElementA const, LayoutA> ref_A; TensorRef<ElementB const, LayoutB> ref_B; TensorRef<ElementC const, LayoutC> ref_C; TensorRef<ElementC, LayoutC> ref_D; TensorRef<ElementE const, LayoutE> ref_E; typename EpilogueOutputOp::Params epilogue; int split_k_slices; // // Methods // /// Default ctor CUTLASS_HOST_DEVICE Arguments(): problem_size(0, 0, 0), split_k_slices(1) { } /// Constructs an Arguments structure CUTLASS_HOST_DEVICE Arguments( GemmCoord problem_size_, TensorRef<ElementA const, LayoutA> ref_A_, TensorRef<ElementB const, LayoutB> ref_B_, TensorRef<ElementC const, LayoutC> ref_C_, TensorRef<ElementC, LayoutC> ref_D_, TensorRef<ElementE, LayoutE> ref_E_, typename EpilogueOutputOp::Params epilogue_ = typename EpilogueOutputOp::Params(), int split_k_slices = 1 ): problem_size(problem_size_), ref_A(ref_A_), ref_B(ref_B_), ref_C(ref_C_), ref_D(ref_D_), ref_E(ref_E_), epilogue(epilogue_), split_k_slices(split_k_slices) { } }; private: /// Kernel parameters object typename GemmKernel::Params params_; public: /// Constructs the GEMM. SparseGemm() { } /// Determines whether the GEMM can execute the given problem. static Status can_implement(Arguments const &args) { if (!kSplitKSerial && args.split_k_slices > 1) { return Status::kErrorInvalidProblem; } Status status = GemmKernel::can_implement( args.problem_size, args.ref_A.non_const_ref(), args.ref_B.non_const_ref(), args.ref_C.non_const_ref(), args.ref_D, args.ref_E.non_const_ref() ); if (status != Status::kSuccess) { return status; } return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { size_t bytes = 0; // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape( args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.split_k_slices); if (kSplitKSerial && args.split_k_slices > 1) { bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n()); } return bytes; } /// Initializes GEMM state from arguments. Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape( args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.split_k_slices); if (kSplitKSerial) { if (args.split_k_slices > 1) { if (!workspace) { return Status::kErrorWorkspaceNull; } size_t bytes = get_workspace_size(args); cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream); if (result != cudaSuccess) { return Status::kErrorInternal; } } } else { if (args.split_k_slices > 1) { return Status::kErrorInvalidProblem; } } // Initialize the Params structure params_ = typename GemmKernel::Params{ args.problem_size, grid_shape, args.ref_A.non_const_ref(), args.ref_B.non_const_ref(), args.ref_C.non_const_ref(), args.ref_D, args.ref_E.non_const_ref(), args.epilogue, static_cast<int *>(workspace) }; int smem_size = int(sizeof(typename GemmKernel::SharedStorage)); if (smem_size >= (48 << 10)) { cudaError_t result = cudaFuncSetAttribute(Kernel<GemmKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } return Status::kSuccess; } /// Lightweight update given a subset of arguments Status update(Arguments const &args, void *workspace = nullptr) { if (kSplitKSerial && args.split_k_slices > 1) { if (!workspace) { return Status::kErrorWorkspaceNull; } } params_.ref_A.reset(args.ref_A.non_const_ref().data()); params_.ref_B.reset(args.ref_B.non_const_ref().data()); params_.ref_C.reset(args.ref_C.non_const_ref().data()); params_.ref_D.reset(args.ref_D.data()); params_.ref_E.reset(args.ref_E.non_const_ref().data()); params_.output_op = args.epilogue; params_.semaphore = static_cast<int *>(workspace); return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape); dim3 block(GemmKernel::kThreadCount, 1, 1); int smem_size = int(sizeof(typename GemmKernel::SharedStorage)); cutlass::Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params_); cudaError_t result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; } // namespace device } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/gemm/device/gemm_sparse.h/0
{ "file_path": "include/cutlass/gemm/device/gemm_sparse.h", "repo_id": "include", "token_count": 6306 }
26
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined RankK kernel. Does not compute batching or support split-K. */ #pragma once #include "cutlass/blas3.h" #include "cutlass/arch/arch.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/gemm/kernel/rank_k_universal.h" #include "cutlass/gemm/kernel/default_rank_k_universal.h" #include "cutlass/gemm/device/default_gemm_configuration.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC, /// Element type for internal accumulation typename ElementAccumulator_ = ElementC_, /// Operator class tag typename OperatorClass_ = arch::OpClassTensorOp, /// Tag indicating architecture to tune for typename ArchTag_ = arch::Sm80, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_, ElementAccumulator_>::ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_, ElementAccumulator_>::WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_, ElementAccumulator_>::InstructionShape, /// Epilogue output operator typename EpilogueOutputOp_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_, ElementAccumulator_>::EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle_ = typename threadblock::GemmIdentityThreadblockSwizzle<>, /// Number of stages used in the pipelined mainloop int Stages = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_, ElementAccumulator_>::kStages, /// Access granularity of A matrix in units of elements int AlignmentA = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_, ElementAccumulator_>::kAlignmentA, /// If true, kernel supports split-K with serial reduction bool SplitKSerial = false, /// Operation performed by SYRK typename Operator_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_, ElementAccumulator_>::Operator, /// Complex elementwise transformation ComplexTransform TransformA = ComplexTransform::kNone, /// Blas3 computation mode (symmetric/hermitian) BlasMode BlasMode_ = BlasMode::kSymmetric> class RankK { public: using ElementA = ElementA_; using LayoutA = LayoutA_; using ElementC = ElementC_; using LayoutC = LayoutC_; using ElementAccumulator = ElementAccumulator_; using OperatorClass = OperatorClass_; using ArchTag = ArchTag_; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using EpilogueOutputOp = EpilogueOutputOp_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; static FillMode const kFillModeC = FillModeC; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentC = EpilogueOutputOp::kCount; static bool const kSplitKSerial = SplitKSerial; static ComplexTransform const kTransformA = TransformA; static BlasMode const kBlasMode = BlasMode_; static int const kUpdateRank = 1; /// Define the kernel using RankKkernel = typename kernel::DefaultRankKUniversal< ElementA, LayoutA, kTransformA, kAlignmentA, ElementC, LayoutC, kFillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, kStages, kSplitKSerial, Operator, kBlasMode >::RankKkernel; using Arguments = typename RankKkernel::Arguments; private: /// Kernel parameters object typename RankKkernel::Params params_; public: /// Constructs the SYRK. RankK() { } /// Determines whether the SYRK can execute the given problem. static Status can_implement(Arguments const &args) { if (!kSplitKSerial && args.batch_count > 1) { return Status::kErrorInvalidProblem; } Status status = RankKkernel::can_implement(args); if (FillModeC != FillMode::kLower && FillModeC != FillMode::kUpper) { return Status::kErrorInvalidProblem; } if (status != Status::kSuccess) { return status; } return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { size_t bytes = 0; // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape( args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.batch_count); if (kSplitKSerial && args.batch_count > 1) { bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n()); } return bytes; } /// Initializes SYRK state from arguments. Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape( args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.batch_count); if (kSplitKSerial) { if (args.batch_count > 1) { if (!workspace) { return Status::kErrorWorkspaceNull; } size_t bytes = get_workspace_size(args); cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream); if (result != cudaSuccess) { return Status::kErrorInternal; } } } else { if (args.batch_count > 1) { return Status::kErrorInvalidProblem; } } int gemm_k_size = args.problem_size.k(); // Initialize the Params structure params_ = typename RankKkernel::Params{ args, grid_tiled_shape, gemm_k_size, static_cast<int *>(workspace) }; int smem_size = int(sizeof(typename RankKkernel::SharedStorage)); if (smem_size >= (48 << 10)) { cudaError_t result = cudaFuncSetAttribute(Kernel<RankKkernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } return Status::kSuccess; } /// Lightweight update given a subset of arguments Status update(Arguments const &args, void *workspace = nullptr) { if (kSplitKSerial && args.batch_count > 1) { if (!workspace) { return Status::kErrorWorkspaceNull; } } size_t workspace_bytes = get_workspace_size(args); if (workspace_bytes && !workspace) { return Status::kErrorWorkspaceNull; } params_.update(args, workspace); return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape); dim3 block(RankKkernel::kThreadCount, 1, 1); int smem_size = int(sizeof(typename RankKkernel::SharedStorage)); cutlass::Kernel<RankKkernel><<<grid, block, smem_size, stream>>>(params_); cudaError_t result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace); if (status == Status::kSuccess) { status = run(stream); } return status; } }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for column-major output exchange operand. template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Element type for C and D matrix operands typename ElementC_, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC, /// Element type for internal accumulation typename ElementAccumulator_, /// Operator class tag typename OperatorClass_, /// Tag indicating architecture to tune for. This is the minimum SM that /// supports the intended feature. The device kernel can be built /// targeting any SM larger than this number. typename ArchTag_, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape_, /// Warp-level tile size (concept: GemmShape) typename WarpShape_, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_, /// Epilogue output operator typename EpilogueOutputOp_, /// Threadblock-level swizzling operator typename ThreadblockSwizzle_, /// Number of stages used in the pipelined mainloop int Stages, /// Access granularity of A matrix in units of elements int AlignmentA, /// If true, kernel supports split-K with serial reduction bool SplitKSerial, /// Operation performed by RankK update kernel typename Operator_, /// Complex elementwise transformation ComplexTransform TransformA, /// Blas3 computation mode (symmetric/hermitian) BlasMode BlasMode_ > class RankK<ElementA_, LayoutA_, ElementC_, layout::ColumnMajor, // partially specialized on LayoutC FillModeC, ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_, WarpShape_, InstructionShape_, EpilogueOutputOp_, ThreadblockSwizzle_, Stages, AlignmentA, SplitKSerial, Operator_, TransformA, BlasMode_> { public: using ElementA = ElementA_; using LayoutA = LayoutA_; using ElementC = ElementC_; using LayoutC = layout::ColumnMajor; using ElementAccumulator = ElementAccumulator_; using OperatorClass = OperatorClass_; using ArchTag = ArchTag_; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using EpilogueOutputOp = EpilogueOutputOp_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; static FillMode const kFillModeC = FillModeC; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentC = EpilogueOutputOp::kCount; static bool const kSplitKSerial = SplitKSerial; static BlasMode const kBlasMode = BlasMode_; static int const kUpdateRank = 1; // Complex transform for input A matrices (function on input layout) static ComplexTransform const kTransformA = TransformA; /// Define the kernel using UnderlyingOperator = typename cutlass::gemm::device::RankK< ElementA, LayoutA, ElementC, layout::RowMajor, InvertFillMode<FillModeC>::mode, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, kStages, kAlignmentA, kSplitKSerial, Operator, kTransformA, kBlasMode >; /// Argument structure using Arguments = typename UnderlyingOperator::Arguments; using RankKkernel = typename UnderlyingOperator::RankKkernel; private: UnderlyingOperator underlying_operator_; public: /// Constructs the RankK. RankK() { } /// Helper to construct a transposed equivalent for the underying RankK operator static Arguments to_underlying_arguments(Arguments const &args) { return args; } /// Determines whether the RankK can execute the given problem. static Status can_implement(Arguments const &args) { return UnderlyingOperator::can_implement(to_underlying_arguments(args)); } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args)); } /// Computes the grid shape static dim3 get_grid_shape(Arguments const &args) { return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args)); } /// Computes the maximum number of active blocks per multiprocessor static int maximum_active_blocks(int smem_capacity = -1) { return UnderlyingOperator::maximum_active_blocks(smem_capacity); } /// Initializes RankK state from arguments. Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream); } /// Lightweight update given a subset of arguments Status update(Arguments const &args, void *workspace = nullptr) { return underlying_operator_.update(to_underlying_arguments(args), workspace); } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { return underlying_operator_.run(stream); } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; //////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace RankK } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/gemm/device/rank_k.h/0
{ "file_path": "include/cutlass/gemm/device/rank_k.h", "repo_id": "include", "token_count": 5606 }
27
/*************************************************************************************************** * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Gemm kernel with an epilogue that computes the absolute maximum value of the output and a pre-activation-function auxiliary output. The auxiliary output is also (optionally) stored to global memory. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/layout/layout.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/complex.h" #include "cutlass/semaphore.h" #include "cutlass/gemm/kernel/params_universal_base.h" #include "cutlass/trace.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// // Gemm that computes the absolute maximum value of the output and a pre-activation-function // auxiliary output. template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_ ///! Threadblock swizzling function > struct GemmWithAbsMax { public: using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename Epilogue::OutputTileIterator::Layout; static ComplexTransform const kTransformA = Mma::kTransformA; static ComplexTransform const kTransformB = Mma::kTransformB; using Operator = typename Mma::Operator; using OperatorClass = typename Mma::Operator::OperatorClass; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename Mma::Operator::Shape; using InstructionShape = typename Mma::Policy::Operator::InstructionShape; using ArchTag = typename Mma::ArchTag; static int const kStages = Mma::kStages; static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; /// Split-K preserves splits that are 128b aligned static int const kSplitKAlignment = const_max( 128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value ); // // Structures // /// Argument structure struct Arguments : UniversalArgumentsBase { // // Data members // typename EpilogueOutputOp::Params epilogue; void const * ptr_A; void const * ptr_B; void const * ptr_C; void * ptr_D; void * ptr_Aux; void * ptr_Vector; int64_t batch_stride_A; int64_t batch_stride_B; int64_t batch_stride_C; int64_t batch_stride_Vector; typename LayoutA::Stride::Index lda; typename LayoutB::Stride::Index ldb; typename LayoutC::Stride::Index ldc; typename LayoutC::Stride::Index ldd; typename LayoutC::Stride::Index ldaux; typename LayoutC::Stride::Index ldr; // // Methods // Arguments(): ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr), ptr_Aux(nullptr) {} /// Constructs an arguments structure with ldaux Arguments( GemmUniversalMode mode, GemmCoord problem_size, int batch_count, typename EpilogueOutputOp::Params epilogue, void const * ptr_A, void const * ptr_B, void const * ptr_C, void * ptr_D, void * ptr_Aux, void * ptr_Vector, int64_t batch_stride_A, int64_t batch_stride_B, int64_t batch_stride_C, int64_t batch_stride_D, int64_t batch_stride_Vector, typename LayoutA::Stride::Index lda, typename LayoutB::Stride::Index ldb, typename LayoutC::Stride::Index ldc, typename LayoutC::Stride::Index ldd, typename LayoutC::Stride::Index ldr, typename LayoutC::Stride::Index ldaux) : UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), epilogue(epilogue), ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), ptr_Aux(ptr_Aux), ptr_Vector(ptr_Vector), batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_Vector(batch_stride_Vector), lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ldaux(ldaux), ldr(ldr) { } /// Constructs an Arguments structure without ldaux. /// These parameters are overridden with D batch stride and ldd. Arguments( GemmUniversalMode mode, GemmCoord problem_size, int batch_count, typename EpilogueOutputOp::Params epilogue, void const * ptr_A, void const * ptr_B, void const * ptr_C, void * ptr_D, void * ptr_Aux, void * ptr_Vector, int64_t batch_stride_A, int64_t batch_stride_B, int64_t batch_stride_C, int64_t batch_stride_D, int64_t batch_stride_Vector, typename LayoutA::Stride::Index lda, typename LayoutB::Stride::Index ldb, typename LayoutC::Stride::Index ldc, typename LayoutC::Stride::Index ldd, typename LayoutC::Stride::Index ldr) : Arguments(mode, problem_size, batch_count, epilogue, ptr_A, ptr_B, ptr_C, ptr_D, ptr_Aux, ptr_Vector, batch_stride_A, batch_stride_B, batch_stride_C, batch_stride_D, batch_stride_Vector, lda, ldb, ldc, ldd, ldr, ldd) { } /// Returns arguments for the transposed problem Arguments transposed_problem() const { Arguments args(*this); std::swap(args.problem_size.m(), args.problem_size.n()); std::swap(args.ptr_A, args.ptr_B); std::swap(args.lda, args.ldb); std::swap(args.batch_stride_A, args.batch_stride_B); return args; } }; // // Structure for precomputing values in host memory and passing to kernels // /// Parameters structure struct Params : UniversalParamsBase< ThreadblockSwizzle, ThreadblockShape, ElementA, ElementB, ElementC, LayoutA, LayoutB> { using ParamsBase = UniversalParamsBase< ThreadblockSwizzle, ThreadblockShape, ElementA, ElementB, ElementC, LayoutA, LayoutB>; // // Data members // typename Mma::IteratorA::Params params_A; typename Mma::IteratorB::Params params_B; typename Epilogue::OutputTileIterator::Params params_C; typename Epilogue::OutputTileIterator::Params params_D; typename Epilogue::AuxOutputTileIterator::Params params_Aux; typename EpilogueOutputOp::Params output_op; void * ptr_A; void * ptr_B; void * ptr_C; void * ptr_D; void * ptr_Aux; void * ptr_Vector; typename LayoutC::Stride::Index ldr; int64_t batch_stride_A; int64_t batch_stride_B; int64_t batch_stride_C; int64_t batch_stride_Vector; // // Host dispatch API // /// Default constructor Params() = default; /// Constructor Params( Arguments const &args, /// GEMM application arguments int device_sms, /// Number of SMs on the device int sm_occupancy) /// Kernel SM occupancy (in thread blocks) : ParamsBase(args, device_sms, sm_occupancy), params_A(args.lda), params_B(args.ldb), params_C(args.ldc), params_D(args.ldd), params_Aux(args.ldaux), output_op(args.epilogue), ptr_A(const_cast<void *>(args.ptr_A)), ptr_B(const_cast<void *>(args.ptr_B)), ptr_C(const_cast<void *>(args.ptr_C)), ptr_D(args.ptr_D), ptr_Aux(args.ptr_Aux), ptr_Vector(args.ptr_Vector), ldr(args.ldr), batch_stride_A(args.batch_stride_A), batch_stride_B(args.batch_stride_B), batch_stride_C(args.batch_stride_C), batch_stride_Vector(args.batch_stride_Vector) { } /// Lightweight update given a subset of arguments. CUTLASS_HOST_DEVICE void update(Arguments const &args) { ptr_A = const_cast<void *>(args.ptr_A); ptr_B = const_cast<void *>(args.ptr_B); ptr_C = const_cast<void *>(args.ptr_C); ptr_D = args.ptr_D; ptr_Aux = args.ptr_Aux; ptr_Vector = args.ptr_Vector; ldr = args.ldr; batch_stride_A = args.batch_stride_A; batch_stride_B = args.batch_stride_B; batch_stride_C = args.batch_stride_C; this->batch_stride_D = args.batch_stride_D; batch_stride_Vector = args.batch_stride_Vector; output_op = args.epilogue; } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; public: // // Host dispatch API // /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size) { static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; bool isAMisaligned = false; bool isBMisaligned = false; bool isCMisaligned = false; if (platform::is_same<LayoutA, layout::RowMajor>::value) { isAMisaligned = problem_size.k() % kAlignmentA; } else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) { isAMisaligned = problem_size.m() % kAlignmentA; } else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value || platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) { isAMisaligned = problem_size.k() % kAlignmentA; } if (platform::is_same<LayoutB, layout::RowMajor>::value) { isBMisaligned = problem_size.n() % kAlignmentB; } else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) { isBMisaligned = problem_size.k() % kAlignmentB; } else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value || platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) { isBMisaligned = problem_size.k() % kAlignmentB; } if (platform::is_same<LayoutC, layout::RowMajor>::value) { isCMisaligned = problem_size.n() % kAlignmentC; } else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) { isCMisaligned = problem_size.m() % kAlignmentC; } else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value || platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) { isCMisaligned = problem_size.n() % kAlignmentC; } if (isAMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand"); return Status::kErrorMisalignedOperand; } if (isBMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand"); return Status::kErrorMisalignedOperand; } if (isCMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand"); return Status::kErrorMisalignedOperand; } CUTLASS_TRACE_HOST(" returning kSuccess"); return Status::kSuccess; } static Status can_implement(Arguments const &args) { return can_implement(args.problem_size); } public: // // Device-only API // // Factory invocation CUTLASS_DEVICE static void invoke( Params const &params, SharedStorage &shared_storage) { GemmWithAbsMax op; op(params, shared_storage); } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } int offset_k = 0; int problem_size_k = params.problem_size.k(); ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A); ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B); // // Fetch pointers based on mode. // if (params.mode == GemmUniversalMode::kGemm || params.mode == GemmUniversalMode::kGemmSplitKParallel) { if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; } offset_k = threadblock_tile_offset.k() * params.gemm_k_size; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; } else if (params.mode == GemmUniversalMode::kArray) { ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()]; ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()]; } __syncthreads(); // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ threadblock_tile_offset.m() * Mma::Shape::kM, offset_k, }; cutlass::MatrixCoord tb_offset_B{ offset_k, threadblock_tile_offset.n() * Mma::Shape::kN }; // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.params_A, ptr_A, {params.problem_size.m(), problem_size_k}, thread_idx, tb_offset_A); typename Mma::IteratorB iterator_B( params.params_B, ptr_B, {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; // Compute threadblock-scoped matrix multiply-add mma( gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); // // Epilogue // EpilogueOutputOp output_op(params.output_op); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.n() * Mma::Shape::kN ); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C); ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D); typename Epilogue::ElementAuxOutput *ptr_Aux = static_cast<typename Epilogue::ElementAuxOutput *>(params.ptr_Aux); typename Epilogue::ElementVector *ptr_Vector = static_cast<typename Epilogue::ElementVector *>(params.ptr_Vector); // // Fetch pointers based on mode. // // // Special path when split-K not enabled. // if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() == 1) { // Tile iterators loading from source tensors. typename Epilogue::OutputTileIterator iterator_C( params.params_C, ptr_C, params.problem_size.mn(), thread_idx, threadblock_offset ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.problem_size.mn(), thread_idx, threadblock_offset ); // Tile iterator writing to auxiliary tensor. typename Epilogue::AuxOutputTileIterator iterator_Aux( params.params_Aux, ptr_Aux, params.problem_size.mn(), thread_idx, threadblock_offset ); // Construct the epilogue Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Move to appropriate location for this output tile if (ptr_Vector) { ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr; } // Execute the epilogue operator to update the destination tensor. epilogue(output_op, ptr_Vector, iterator_D, accumulators, iterator_C, iterator_Aux, params.problem_size.mn(), threadblock_offset); return; } // // Slower path when split-K or batching is needed // // Construct the semaphore. Semaphore semaphore(params.semaphore + block_idx, thread_idx); if (params.mode == GemmUniversalMode::kGemm) { // If performing a reduction via split-K, fetch the initial synchronization if (params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } } else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; if (ptr_Aux) { ptr_Aux += threadblock_tile_offset.k() * params.batch_stride_D; } if (ptr_Vector) { ptr_Vector += threadblock_tile_offset.k() * params.batch_stride_Vector; } } else if (params.mode == GemmUniversalMode::kArray) { ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()]; ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()]; if (ptr_Aux) { ptr_Aux = static_cast<typename Epilogue::ElementAuxOutput * const *>(params.ptr_Aux)[threadblock_tile_offset.k()]; } if (ptr_Vector) { ptr_Vector = static_cast<typename Epilogue::ElementVector * const *>(params.ptr_Vector)[threadblock_tile_offset.k()]; } } // Tile iterators loading from source tensors. typename Epilogue::OutputTileIterator iterator_C( params.params_C, ptr_C, params.problem_size.mn(), thread_idx, threadblock_offset ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.problem_size.mn(), thread_idx, threadblock_offset ); // Tile iterator writing to auxiliary destination tensor. typename Epilogue::AuxOutputTileIterator iterator_Aux( params.params_Aux, // Only the final block writes the auxiliary tensor ((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) && (params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1)) ? nullptr : ptr_Aux, params.problem_size.mn(), thread_idx, threadblock_offset ); // Construct the epilogue Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_offset.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_offset.k()); } // Move to appropriate location for this output tile if (ptr_Vector) { ptr_Vector += threadblock_offset.column() + threadblock_tile_offset.m() * params.ldr; } // Execute the epilogue operator to update the destination tensor. epilogue(output_op, // Only the final block uses Vector ((params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) && (params.grid_tiled_shape.k() != threadblock_tile_offset.k() + 1)) ? nullptr : ptr_Vector, iterator_D, accumulators, iterator_C, iterator_Aux, params.problem_size.mn(), threadblock_offset); // // Release the semaphore // if ((params.mode == GemmUniversalMode::kGemm) && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_offset.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/gemm/kernel/gemm_with_absmax.h/0
{ "file_path": "include/cutlass/gemm/kernel/gemm_with_absmax.h", "repo_id": "include", "token_count": 9516 }
28
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/kernel_hardware_info.hpp" #include "cute/arch/cluster_sm90.hpp" #include "cutlass/arch/reg_reconfig.h" #include "cutlass/arch/mma_sm90.h" #include "cutlass/epilogue/collective/detail.hpp" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp" #include "cutlass/pipeline/pipeline.hpp" #include "cutlass/trace.h" #include "cute/tensor.hpp" /////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::kernel { /////////////////////////////////////////////////////////////////////////////// template < class ProblemShape_, class CollectiveMainloop_, class CollectiveEpilogue_, class TileScheduler_ > class GemmUniversal< ProblemShape_, CollectiveMainloop_, CollectiveEpilogue_, TileScheduler_, cute::enable_if_t<cute::is_base_of_v<KernelTmaWarpSpecialized, typename CollectiveMainloop_::DispatchPolicy::Schedule>>> { public: // // Type Aliases // using ProblemShape = ProblemShape_; static_assert(cute::rank(ProblemShape{}) == 3 or cute::rank(ProblemShape{}) == 4, "ProblemShape{} should be <M,N,K> or <M,N,K,L>"); // Mainloop derived types using CollectiveMainloop = CollectiveMainloop_; using TileShape = typename CollectiveMainloop::TileShape; using TiledMma = typename CollectiveMainloop::TiledMma; using ArchTag = typename CollectiveMainloop::ArchTag; using ElementA = typename CollectiveMainloop::ElementA; using StrideA = typename CollectiveMainloop::StrideA; using ElementB = typename CollectiveMainloop::ElementB; using StrideB = typename CollectiveMainloop::StrideB; using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; using ClusterShape = typename DispatchPolicy::ClusterShape; using MainloopArguments = typename CollectiveMainloop::Arguments; using MainloopParams = typename CollectiveMainloop::Params; static_assert(ArchTag::kMinComputeCapability >= 90); // Epilogue derived types using CollectiveEpilogue = CollectiveEpilogue_; using ElementC = typename CollectiveEpilogue::ElementC; using StrideC = typename CollectiveEpilogue::StrideC; using ElementD = typename CollectiveEpilogue::ElementD; using StrideD = typename CollectiveEpilogue::StrideD; using EpilogueArguments = typename CollectiveEpilogue::Arguments; using EpilogueParams = typename CollectiveEpilogue::Params; static_assert(cute::is_void_v<TileScheduler_> or cute::is_same_v<TileScheduler_, PersistentScheduler>, "TMA warp-specialized kernel does not support specializing the tile scheduler."); using TileSchedulerTag = TileScheduler_; using TileScheduler = typename detail::TileSchedulerSelector< TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler; using TileSchedulerArguments = typename TileScheduler::Arguments; // Kernel level shared memory storage struct SharedStorage { // Mainloop and epilogue don't use smem concurrently since kernel is non-persistent, so we can use a union union TensorStorage { using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage; using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage; MainloopTensorStorage mainloop; EpilogueTensorStorage epilogue; } tensors; struct PipelineStorage : cute::aligned_struct<16> { using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage; using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage; alignas(16) MainloopPipelineStorage mainloop; alignas(16) EpiLoadPipelineStorage epi_load; } pipelines; }; static constexpr int SharedStorageSize = sizeof(SharedStorage); static constexpr uint32_t NumLoadWarpGroups = 1; static constexpr uint32_t NumMmaWarpGroups = 1; static constexpr uint32_t MaxThreadsPerBlock = CUTE_STATIC_V(size(TiledMma{})) + (NumLoadWarpGroups * NumThreadsPerWarpGroup); static constexpr uint32_t MinBlocksPerMultiprocessor = 1; // Device side arguments struct Arguments { GemmUniversalMode mode{}; ProblemShape problem_shape{}; MainloopArguments mainloop{}; EpilogueArguments epilogue{}; KernelHardwareInfo hw_info{}; TileSchedulerArguments scheduler{}; }; // Kernel entry point API struct Params { GemmUniversalMode mode{}; ProblemShape problem_shape{}; MainloopParams mainloop{}; EpilogueParams epilogue{}; }; // // Methods // // Convert to underlying arguments. In this case, a simple copy for the aliased type. static Params to_underlying_arguments(Arguments const& args, void* workspace) { (void) workspace; auto problem_shape = args.problem_shape; if constexpr (detail::Has_SwapAB_v<CollectiveMainloop>) { // swap M/N get<0>(problem_shape) = get<1>(args.problem_shape); get<1>(problem_shape) = get<0>(args.problem_shape); } return { args.mode, problem_shape, CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace), CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace) }; } static bool can_implement(Arguments const& args) { bool implementable = (args.mode == GemmUniversalMode::kGemm) or (args.mode == GemmUniversalMode::kBatched && cute::rank(ProblemShape{}) == 4); if (!implementable) { CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n"); return implementable; } implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop); implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue); implementable &= TileScheduler::can_implement(args.scheduler); return implementable; } static size_t get_workspace_size(Arguments const& args) { return 0; } static cutlass::Status initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr, CudaHostAdapter* cuda_adapter = nullptr) { return Status::kSuccess; } // Computes the kernel launch grid shape based on runtime parameters static dim3 get_grid_shape(Params const& params) { auto cluster_shape = ClusterShape{}; auto tile_shape = TileShape{}; auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); return TileScheduler::get_tiled_cta_shape_mnl( problem_shape_MNKL, tile_shape, cluster_shape); } static dim3 get_block_shape() { return dim3(MaxThreadsPerBlock, 1, 1); } CUTLASS_DEVICE void operator()(Params const& params, char* smem_buf) { using namespace cute; using X = Underscore; #if defined(__CUDA_ARCH_FEAT_SM90_ALL) # define ENABLE_SM90_KERNEL_LEVEL 1 #endif // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. #if ! defined(ENABLE_SM90_KERNEL_LEVEL) printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); #else enum class WarpGroupRole { Producer = 0, Consumer = 1, }; enum class ProducerWarpRole { MainloopEpilogue = 0, Warp1 = 1, Warp2 = 2, Warp3 = 3 }; // Kernel level shared memory storage SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(smem_buf); int thread_idx = int(threadIdx.x); int lane_idx = canonical_lane_idx(); int warp_idx = canonical_warp_idx_sync(); int warp_idx_in_warp_group = warp_idx % NumWarpsPerWarpGroup; int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup; auto warp_group_role = WarpGroupRole(canonical_warp_group_idx()); auto producer_warp_role = ProducerWarpRole(warp_idx_in_warp_group); int lane_predicate = cute::elect_one_sync(); uint32_t block_rank_in_cluster = cute::block_rank_in_cluster(); // Issue Tma Descriptor Prefetch from a single thread if ((warp_idx == 0) && lane_predicate) { CollectiveMainloop::prefetch_tma_descriptors(params.mainloop); CollectiveEpilogue::prefetch_tma_descriptors(params.epilogue); } // Mainloop Load pipeline using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline; typename MainloopPipeline::Params mainloop_pipeline_params; if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::MainloopEpilogue) { mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; } if (warp_group_role == WarpGroupRole::Consumer) { mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; } mainloop_pipeline_params.is_leader = warp_group_thread_idx == 0; mainloop_pipeline_params.num_consumers = NumThreadsPerWarpGroup; mainloop_pipeline_params.transaction_bytes = params.mainloop.tma_transaction_bytes; MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params, ClusterShape{}); // Epilogue Load pipeline using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline; typename EpiLoadPipeline::Params epi_load_pipeline_params; if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::MainloopEpilogue) { epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer; } if (warp_group_role == WarpGroupRole::Consumer) { epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer; } epi_load_pipeline_params.dst_blockid = cute::block_rank_in_cluster(); epi_load_pipeline_params.producer_arv_count = NumThreadsPerWarp; epi_load_pipeline_params.consumer_arv_count = NumThreadsPerWarpGroup; if constexpr (CollectiveEpilogue::RequiresTransactionBytes) { epi_load_pipeline_params.transaction_bytes = params.epilogue.tma_transaction_bytes; } EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params); // Epilogue Store pipeline using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline; typename EpiStorePipeline::Params epi_store_pipeline_params; epi_store_pipeline_params.always_wait = true; EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params); // Initialize starting pipeline states for the collectives // Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding) typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state; typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state; // For the DMA Load (producer) we start with an opposite phase // i.e., we skip all waits since we know that the buffer is indeed empty PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state<MainloopPipeline>(); PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state<EpiLoadPipeline>(); PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state<EpiStorePipeline>(); auto cluster_wait_fn = [&] () { // We need this to guarantee that the Pipeline init is visible // To all producers and consumer thread blocks in the Cluster if constexpr (size(ClusterShape{}) > 1) { cute::cluster_arrive_relaxed(); return [] () { cute::cluster_wait(); }; } else { __syncthreads(); return [] () {}; // do nothing } } (); // Preconditions static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); // Optionally append 1s until problem shape is rank-4 in case it is only rank-3 (MNK) auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); // Get the appropriate blocks for this thread block -- potential for thread block locality auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) TiledMma tiled_mma; // In a warp specialized kernel, collectives expose data movement and compute operations separately CollectiveMainloop collective_mainloop; CollectiveEpilogue collective_epilogue(params.epilogue, shared_storage.tensors.epilogue); // Prepare and partition the input tensors. Expects a tuple of tensors where: // get<0>(load_inputs) is the tma tensor A after local tiling so that it has shape (BLK_M,BLK_K,m,k,l) // get<1>(load_inputs) is the tma tensor B after local tiling so that it has shape (BLK_N,BLK_K,n,k,l) auto load_inputs = collective_mainloop.load_init(problem_shape_MNKL, params.mainloop); static_assert(cute::tuple_size_v<decltype(load_inputs)> >= 2, "Output of load_init must have at least two elements (A, B)"); // Extract out partitioned A and B. Tensor gA_mkl = get<0>(load_inputs); Tensor gB_nkl = get<1>(load_inputs); // Compute m_coord, n_coord, and l_coord with their post-tiled shapes auto m_coord = idx2crd(int(blockIdx.x), shape<2>(gA_mkl)); auto n_coord = idx2crd(int(blockIdx.y), shape<2>(gB_nkl)); auto l_coord = idx2crd(int(blockIdx.z), shape<4>(gB_nkl)); auto blk_coord = make_coord(m_coord, n_coord, _, l_coord); // Get pipeline iterators and increments from tensor shapes auto k_tile_iter = cute::make_coord_iterator(shape<3>(gA_mkl)); auto k_tile_count = size<3>(gA_mkl); // Wait for all thread blocks in the Cluster cluster_wait_fn(); if (warp_group_role == WarpGroupRole::Producer) { if (producer_warp_role == ProducerWarpRole::MainloopEpilogue) { collective_mainloop.load( params.mainloop, mainloop_pipeline, mainloop_pipe_producer_state, load_inputs, blk_coord, k_tile_iter, k_tile_count, lane_idx, block_rank_in_cluster, shared_storage.tensors.mainloop ); // Update starting mainloop pipeline state for the pipeline drain mainloop_pipe_producer_state.advance(k_tile_count); // Make sure mainloop consumer has been waited upon before issuing epilogue load collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state); if (collective_epilogue.is_producer_load_needed()) { // Ensure warp is converged before issuing epilogue loads __syncwarp(); epi_load_pipe_producer_state = collective_epilogue.load( epi_load_pipeline, epi_load_pipe_producer_state, problem_shape_MNKL, blk_shape, blk_coord, tiled_mma, lane_idx, shared_storage.tensors.epilogue ); collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state); } } } else if (warp_group_role == WarpGroupRole::Consumer) { Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) collective_mainloop.mma( mainloop_pipeline, mainloop_pipe_consumer_state, accumulators, k_tile_count, warp_group_thread_idx, shared_storage.tensors.mainloop, params.mainloop ); // Make sure the math instructions are done and free buffers before entering the epilogue collective_mainloop.mma_tail( mainloop_pipeline, mainloop_pipe_consumer_state, k_tile_count ); // Epilogue and write to gD auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] = collective_epilogue.store( epi_load_pipeline, epi_load_pipe_consumer_state, epi_store_pipeline, epi_store_pipe_producer_state, problem_shape_MNKL, blk_shape, blk_coord, accumulators, tiled_mma, warp_group_thread_idx, shared_storage.tensors.epilogue ); collective_epilogue.store_tail( epi_load_pipeline, epi_load_pipe_consumer_state_next, epi_store_pipeline, epi_store_pipe_producer_state_next ); } #endif } }; /////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm::kernel
include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp/0
{ "file_path": "include/cutlass/gemm/kernel/sm90_gemm_tma_warpspecialized.hpp", "repo_id": "include", "token_count": 6864 }
29
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/blas3.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/complex.h" #include "cutlass/semaphore.h" #include "cutlass/core_io.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_, ///! Threadblock swizzling function SideMode SideMode_, ///! Side Mode for the kernel (kLeft or kRight) FillMode FillMode_, ///! Fill Mode for triangular matrix (kLower or kUpper) DiagType DiagType_ ///! Diag Type for triangular matrix (kNonUnit or kUnit) > struct TrmmUniversal { public: using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename Epilogue::OutputTileIterator::Layout; static SideMode const kSideMode = SideMode_; static FillMode const kFillMode = FillMode_; static DiagType const kDiagType = DiagType_; static ComplexTransform const kTransformA = Mma::kTransformA; static ComplexTransform const kTransformB = Mma::kTransformB; using Operator = typename Mma::Operator; using OperatorClass = typename Mma::Operator::OperatorClass; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename Mma::Operator::Shape; using InstructionShape = typename Mma::Policy::Operator::InstructionShape; using ArchTag = typename Mma::ArchTag; static int const kStages = Mma::kStages; static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; /// Split-K preserves splits that are 128b aligned static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value); // // Structures // /// Argument structure struct Arguments { // // Data members // GemmUniversalMode mode{GemmUniversalMode::kGemm}; GemmCoord problem_size{}; int batch_count{1}; typename EpilogueOutputOp::Params epilogue{}; void const * ptr_A{nullptr}; void const * ptr_B{nullptr}; void * ptr_D{nullptr}; int64_t batch_stride_A{0}; int64_t batch_stride_B{0}; int64_t batch_stride_D{0}; typename LayoutA::Stride::Index lda{0}; typename LayoutB::Stride::Index ldb{0}; typename LayoutC::Stride::Index ldd{0}; // // Methods // Arguments() = default; /// constructs an arguments structure Arguments( GemmUniversalMode mode, GemmCoord problem_size, int batch_count, typename EpilogueOutputOp::Params epilogue, void const * ptr_A, void const * ptr_B, void * ptr_D, int64_t batch_stride_A, int64_t batch_stride_B, int64_t batch_stride_D, typename LayoutA::Stride::Index lda, typename LayoutB::Stride::Index ldb, typename LayoutC::Stride::Index ldd ): mode(mode), problem_size(problem_size), batch_count(batch_count), epilogue(epilogue), ptr_A(ptr_A), ptr_B(ptr_B), ptr_D(ptr_D), batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_D(batch_stride_D), lda(lda), ldb(ldb), ldd(ldd) { } /// Returns arguments for the transposed problem sizes Arguments transposed_problem_size() const { Arguments args(*this); std::swap(args.problem_size.m(), args.problem_size.n()); return args; } /// Returns arguments for the transposed matrices Arguments swapped_matrices() const { Arguments args(*this); std::swap(args.ptr_A, args.ptr_B); std::swap(args.lda, args.ldb); std::swap(args.batch_stride_A, args.batch_stride_B); return args; } }; // // Structure for precomputing values in host memory and passing to kernels // /// Parameters structure struct Params { cutlass::gemm::GemmCoord problem_size{}; cutlass::gemm::GemmCoord grid_tiled_shape{}; int swizzle_log_tile{0}; typename Mma::IteratorA::Params params_A{}; typename Mma::IteratorB::Params params_B{}; typename Epilogue::OutputTileIterator::Params params_D{}; typename EpilogueOutputOp::Params output_op{}; GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm; int batch_count {0}; int gemm_k_size {0}; void * ptr_A{nullptr}; void * ptr_B{nullptr}; void * ptr_D{nullptr}; int64_t batch_stride_A {0}; int64_t batch_stride_B {0}; int64_t batch_stride_D {0}; int *semaphore{nullptr}; // // Methods // Params() = default; CUTLASS_HOST_DEVICE Params( Arguments const &args, cutlass::gemm::GemmCoord const & grid_tiled_shape, int gemm_k_size, void *workspace = nullptr ): problem_size(args.problem_size), grid_tiled_shape(grid_tiled_shape), swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)), params_A(args.lda), params_B(args.ldb), params_D(args.ldd), output_op(args.epilogue), mode(args.mode), batch_count(args.batch_count), gemm_k_size(gemm_k_size), ptr_A(const_cast<void *>(args.ptr_A)), ptr_B(const_cast<void *>(args.ptr_B)), ptr_D(args.ptr_D), batch_stride_A(args.batch_stride_A), batch_stride_B(args.batch_stride_B), batch_stride_D(args.batch_stride_D), semaphore(static_cast<int *>(workspace)) { } CUTLASS_HOST_DEVICE void update( Arguments const &args, void *workspace = nullptr) { ptr_A = const_cast<void *>(args.ptr_A); ptr_B = const_cast<void *>(args.ptr_B); ptr_D = args.ptr_D; batch_stride_A = args.batch_stride_A; batch_stride_B = args.batch_stride_B; batch_stride_D = args.batch_stride_D; output_op = args.epilogue; semaphore = static_cast<int *>(workspace); } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; public: // // Methods // CUTLASS_DEVICE TrmmUniversal() { } /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size) { static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; if ((problem_size.m() % kAlignmentA) || (problem_size.k() % kAlignmentA) || (problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) || (problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC)) { return Status::kErrorMisalignedOperand; } return Status::kSuccess; } static Status can_implement(Arguments const &args) { return can_implement(args.problem_size); } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } int offset_k = 0; int problem_size_k = params.problem_size.k(); ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A); ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B); // // Fetch pointers based on mode. // if (params.mode == GemmUniversalMode::kGemm || params.mode == GemmUniversalMode::kGemmSplitKParallel) { if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; } offset_k = threadblock_tile_offset.k() * params.gemm_k_size; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; } else if (params.mode == GemmUniversalMode::kArray) { ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()]; ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()]; } __syncthreads(); // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ threadblock_tile_offset.m() * Mma::Shape::kM, offset_k, }; cutlass::MatrixCoord tb_offset_B{ offset_k, threadblock_tile_offset.n() * Mma::Shape::kN }; // Compute position within threadblock int thread_idx = threadIdx.x; // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; /****************************************************************************************************** First two cases: (Left Side, Lower Fill) and (Right Side, Upper Fill) are transpose of each other - (Left Side, Lower Fill): calculate bottom of the CTA tile, then find the k-iterations needed to process all elements till that coordinate. - (Right Side, Upper Fill): calculate right end of the CTA tile, then find the k-iterations needed to process all elements till that coordinate. Last two cases: (Left Side, Upper Fill) and (Right Side, Lower Fill) are transpose of each other - (Left Side, Upper Fill): calculate the top of the CTA tile, then find k-iterations that can be skipped for all elements of this tile. - (Right Side, Lower Fill): calculate the left start of the CTA tile, then find k-iterations that can be skipped for all elements of this tile. ********************************************************************************************************/ if (kSideMode == SideMode::kLeft && kFillMode == FillMode::kLower) { int k_iterations_till_diagonal = ((threadblock_tile_offset.m() + 1) * Mma::Shape::kM + Mma::Shape::kK - 1) / Mma::Shape::kK; if (k_iterations_till_diagonal < gemm_k_iterations) { gemm_k_iterations = k_iterations_till_diagonal; } } else if (kSideMode == SideMode::kRight && kFillMode == FillMode::kUpper) { int k_iterations_till_diagonal = ((threadblock_tile_offset.n() + 1) * Mma::Shape::kN + Mma::Shape::kK - 1) / Mma::Shape::kK; if (k_iterations_till_diagonal < gemm_k_iterations) { gemm_k_iterations = k_iterations_till_diagonal; } } else if (kSideMode == SideMode::kLeft && kFillMode == FillMode::kUpper) { int k_iterations_till_diagonal = ((threadblock_tile_offset.m()) * Mma::Shape::kM) / Mma::Shape::kK; if (k_iterations_till_diagonal != 0) { tb_offset_A += cutlass::MatrixCoord({0, k_iterations_till_diagonal * Mma::Shape::kK}); tb_offset_B += cutlass::MatrixCoord({k_iterations_till_diagonal * Mma::Shape::kK, 0}); gemm_k_iterations -= k_iterations_till_diagonal; } } else if (kSideMode == SideMode::kRight && kFillMode == FillMode::kLower) { int k_iterations_till_diagonal = ((threadblock_tile_offset.n()) * Mma::Shape::kN) / Mma::Shape::kK; if (k_iterations_till_diagonal != 0) { tb_offset_A += cutlass::MatrixCoord({0, k_iterations_till_diagonal * Mma::Shape::kK}); tb_offset_B += cutlass::MatrixCoord({k_iterations_till_diagonal * Mma::Shape::kK, 0}); gemm_k_iterations -= k_iterations_till_diagonal; } } // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.params_A, ptr_A, {params.problem_size.m(), problem_size_k}, thread_idx, tb_offset_A); typename Mma::IteratorB iterator_B( params.params_B, ptr_B, {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B); // Compute threadblock-scoped matrix multiply-add mma( gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators); // // Epilogue // EpilogueOutputOp output_op(params.output_op); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.n() * Mma::Shape::kN ); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D); // // Fetch pointers based on mode. // // Construct the semaphore. Semaphore semaphore(params.semaphore + block_idx, thread_idx); if (params.mode == GemmUniversalMode::kGemm) { // If performing a reduction via split-K, fetch the initial synchronization if (params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } } else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kArray) { ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()]; } // Tile iterator loading from source tensor (although irrelevant to this kernel as beta is zero). typename Epilogue::OutputTileIterator iterator_C( params.params_D, ptr_D, params.problem_size.mn(), thread_idx, threadblock_offset ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.problem_size.mn(), thread_idx, threadblock_offset ); Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_offset.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_offset.k()); __threadfence(); } // Execute the epilogue operator to update the destination tensor. epilogue( output_op, iterator_D, accumulators, iterator_C); // // Release the semaphore // if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_offset.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/gemm/kernel/trmm_universal.h/0
{ "file_path": "include/cutlass/gemm/kernel/trmm_universal.h", "repo_id": "include", "token_count": 7375 }
30
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data layout of the global memory fragments, data types, and internal tile sizes. Partial specializations for threadblock::Mma operations targeting TensorOp instructions. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/fast_math.h" #include "cutlass/arch/wmma.h" #if defined(CUTLASS_ARCH_WMMA_ENABLED) #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h" #include "cutlass/gemm/warp/mma_tensor_op_wmma.h" #include "cutlass/gemm/warp/mma_tensor_op_policy.h" #include "cutlass/gemm/threadblock/default_mma_core.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: row-major /// Operator: wmma tensor op class /// /// This uses the default warp-level operator given tile sizes template < ///< Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by GEMM typename Operator_, /// Number of stages int Stages> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::ColumnMajor, ElementB_, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassWmmaTensorOp, Stages, Operator_> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassWmmaTensorOp; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassWmmaTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // // Shared memory layouts // // NOTE: shared memory layout for wmma is same as the operands' layout in the global memory using SmemLayoutA = LayoutA; using SmemLayoutB = LayoutB; // Pad shared memory to avoid bank conflicts static int const kPaddingA = 128 / sizeof_bits<ElementA>::value; static int const kPaddingB = 128 / sizeof_bits<ElementB>::value; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, kAccessSizeInBits / sizeof_bits<ElementB>::value >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA >; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, kAccessSizeInBits / sizeof_bits<ElementB>::value >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Wmma< InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, Operator >, cutlass::MatrixShape<1, 1> >; using MmaTensorOp = cutlass::gemm::warp::MmaTensorOpWmma< WarpShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Policy >; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaTensorOp, MatrixShape<kPaddingA, 0>, MatrixShape<0, kPaddingB>, WarpCount::kK >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: column-major /// Operator: wmma tensorop class /// /// This uses the default warp-level operator given tile sizes template < ///< Shape of threadblock-scoped matrix multiply operator ///< (concept:GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) [allowed /// wmma instruction shapes, e.g., 16x16x16, 32x8x16, 8x32x16,...] typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by GEMM typename Operator_, /// Number of stages int Stages> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::RowMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassWmmaTensorOp, Stages, Operator_> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassWmmaTensorOp; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassWmmaTensorOp>::value; /// Number of threads per threadblock static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; static int const kWarpThreadArrangementContiguousB = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // // shared memory layout for wmma is same as the operands' layout in global memory using SmemLayoutA = LayoutA; using SmemLayoutB = LayoutB; // Pad shared memory to avoid bank conflicts static int const kPaddingA = 128 / sizeof_bits<ElementA>::value; static int const kPaddingB = 128 / sizeof_bits<ElementB>::value; // // Iterators to write to shared memory // using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, kAccessSizeInBits / sizeof_bits<ElementA>::value >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA >; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, kAccessSizeInBits / sizeof_bits<ElementB>::value >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB // SmemThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Wmma< InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, Operator >, cutlass::MatrixShape<1, 1> >; using MmaTensorOp = cutlass::gemm::warp::MmaTensorOpWmma< WarpShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Policy >; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaTensorOp, MatrixShape<0, kPaddingA>, MatrixShape<kPaddingB, 0>, WarpCount::kK >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: row-major /// B: row-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by MMA typename Operator_, /// Number of stages int Stages> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::RowMajor, ElementB_, layout::RowMajor, ElementC_, LayoutC_, arch::OpClassWmmaTensorOp, Stages, Operator_> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::RowMajor; using ElementB = ElementB_; using LayoutB = layout::RowMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassWmmaTensorOp; /// Number of warps present using WarpCount = GemmShape< Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK >; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size." ); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassWmmaTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kWarpThreadArrangementContiguousA = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedA = kWarpSize / kWarpThreadArrangementContiguousA; // // Shared memory layouts // // shared memory layout for wmma is same as the operands' layout in global memory using SmemLayoutA = LayoutA; using SmemLayoutB = LayoutB; // Pad shared memory to avoid bank conflicts static int const kPaddingA = 128 / sizeof_bits<ElementA>::value; static int const kPaddingB = 128 / sizeof_bits<ElementB>::value; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kM>, kThreads, kAccessSizeInBits / sizeof_bits<ElementA>::value >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA >; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kN, Shape::kK>, kThreads, kAccessSizeInBits / sizeof_bits<ElementB>::value >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB >; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Wmma< InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, Operator >, cutlass::MatrixShape<1, 1> >; using MmaTensorOp = cutlass::gemm::warp::MmaTensorOpWmma< WarpShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Policy >; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaTensorOp, MatrixShape<0, kPaddingA>, MatrixShape<0, kPaddingB>, WarpCount::kK >; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization: /// /// A: column-major /// B: column-major /// Operator: tensor op class /// /// This uses the default warp-level operator given tile sizes template < /// Shape of threadblock-scoped matrix multiply operator (concept: /// GemmShape) typename Shape_, /// Shape of warp-level matrix multiply operator (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A operand typename ElementA_, /// Data type of B operand typename ElementB_, /// Data type of accumulator typename ElementC_, /// Layout of accumulator typename LayoutC_, /// Operation performed by MMA typename Operator_, /// Number of stages int Stages> struct DefaultMmaCore<Shape_, WarpShape_, InstructionShape_, ElementA_, layout::ColumnMajor, ElementB_, layout::ColumnMajor, ElementC_, LayoutC_, arch::OpClassWmmaTensorOp, Stages, Operator_> { using Shape = Shape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using ElementA = ElementA_; using LayoutA = layout::ColumnMajor; using ElementB = ElementB_; using LayoutB = layout::ColumnMajor; using ElementC = ElementC_; using LayoutC = LayoutC_; using OperatorClass = arch::OpClassWmmaTensorOp; /// Number of warps present using WarpCount = GemmShape<Shape::kM / WarpShape::kM, Shape::kN / WarpShape::kN, Shape::kK / WarpShape::kK>; // Divisility requirements static_assert( !(Shape::kM % WarpShape::kM) && !(Shape::kN % WarpShape::kN), "Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."); /// Number of threads per warp static int const kWarpSize = warp::WarpSize<arch::OpClassWmmaTensorOp>::value; /// Number of threads total static int const kThreads = WarpCount::kCount * kWarpSize; /// Size of a threadblock-scoped access static int const kAccessSizeInBits = 128; /// Default Operator using Operator = Operator_; // Warp thread arrangement static int const kWarpThreadArrangementContiguousB = Shape::kK / (kAccessSizeInBits / sizeof_bits<ElementA>::value); static int const kWarpThreadArrangementStridedB = kWarpSize / kWarpThreadArrangementContiguousB; // // Shared memory layouts // // shared memory layout for wmma is same as the operands' layout in global memory using SmemLayoutA = LayoutA; using SmemLayoutB = LayoutB; // Pad shared memory to avoid bank conflicts static int const kPaddingA = 128 / sizeof_bits<ElementA>::value; static int const kPaddingB = 128 / sizeof_bits<ElementB>::value; // // Iterators to write to shared memory // /// ThreadMap of iterator A using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kM, Shape::kK>, kThreads, kAccessSizeInBits / sizeof_bits<ElementA>::value >; /// Shared memory iterator to A operand using SmemIteratorA = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kM, Shape::kK>, ElementA, SmemLayoutA, 1, IteratorThreadMapA>; /// ThreadMap of iterator B using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kK, Shape::kN>, kThreads, kAccessSizeInBits / sizeof_bits<ElementB>::value >; /// Shared memory iterator to B operand using SmemIteratorB = transform::threadblock::RegularTileIterator< MatrixShape<Shape::kK, Shape::kN>, ElementB, SmemLayoutB, 0, IteratorThreadMapB>; // // Warp-level matrix multiply operator // // Define the warp-level tensor op using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Wmma< InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, Operator >, cutlass::MatrixShape<1, 1> >; using MmaTensorOp = cutlass::gemm::warp::MmaTensorOpWmma< WarpShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Policy >; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy< MmaTensorOp, MatrixShape<kPaddingA, 0>, MatrixShape<kPaddingB, 0>, WarpCount::kK >; }; } // namespace threadblock } // namespace gemm } // namespace cutlass #endif // defined(CUTLASS_ARCH_WMMA_ENABLED)
include/cutlass/gemm/threadblock/default_mma_core_wmma.h/0
{ "file_path": "include/cutlass/gemm/threadblock/default_mma_core_wmma.h", "repo_id": "include", "token_count": 7246 }
31
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default warp-level GEMM operators selected by data type, size, and layouts of operands. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/warp/mma_sparse_tensor_op.h" namespace cutlass { namespace gemm { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Operator describing the tensor operation typename Operator_ = arch::OpMultiplyAdd, /// Number of partitions along K dimension int PartitionsK = 1, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor = false > struct DefaultSparseMmaTensorOp; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial Specialization - inputs and output types are float - uses TF32 internally template < /// Shape of one matrix production operation (concept: GemmShape) typename WarpShape_, /// Shape of target matrix multiply instruction (concept: GemmShape) typename InstructionShape_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Number of partitions along K dimension int PartitionsK, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor> struct DefaultSparseMmaTensorOp< WarpShape_, InstructionShape_, float, LayoutA, float, LayoutB, float, LayoutC, arch::OpMultiplyAdd, PartitionsK, AccumulatorsInRowMajor> { // Uses TF32 internally using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::SparseMma< InstructionShape_, 32, tfloat32_t, cutlass::layout::RowMajor, tfloat32_t, cutlass::layout::ColumnMajor, float, cutlass::layout::RowMajor, arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::SparseMmaTensorOp< WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC, Policy, PartitionsK, AccumulatorsInRowMajor>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for m-by-n-by-kgroup template < /// Shape of one matrix production operation (concept: GemmShape) typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A elements typename ElementA, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Data type of B elements typename ElementB, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Element type of C matrix typename ElementC, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Operator describing the tensor operation typename Operator_, /// Number of partitions along K dimension int PartitionsK, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor> struct DefaultSparseMmaTensorOp { using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::SparseMma<InstructionShape_, 32, ElementA, cutlass::layout::RowMajor, ElementB, cutlass::layout::ColumnMajor, ElementC, cutlass::layout::RowMajor, Operator_>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::SparseMmaTensorOp< WarpShape_, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, Policy, PartitionsK, AccumulatorsInRowMajor>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/gemm/warp/default_mma_sparse_tensor_op.h/0
{ "file_path": "include/cutlass/gemm/warp/default_mma_sparse_tensor_op.h", "repo_id": "include", "token_count": 2027 }
32
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Describes the lane policy used by warp-level matrix multiply operators targeting SIMT instructions */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/tensor_ref.h" #include "cutlass/matrix_shape.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/mma_simt_policy.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Iterates over operands to warp-level matrix multiply operations targeting SIMT instructions /// /// concept: MutableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Operand identity Operand Operand, /// Data type of A elements typename Element_, /// Layout of operand typename Layout_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Number of partitions along K dimension - used in sliced-K int PartitionsK = 1, /// Group Size along kPartition - used in sliced-K int PartitionGroupSize = 1 > class MmaSimtTileIterator; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Specialization for A operands of column-major layouts /// /// Concept: MutableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of A elements typename Element_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Number of partitions along K dimension - used in sliced-K int PartitionsK, /// Group Size along kPartition - used in sliced-K int PartitionGroupSize > class MmaSimtTileIterator<Shape_, Operand::kA, Element_, layout::ColumnMajor, Policy_, PartitionsK, PartitionGroupSize> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kA; /// Element type using Element = Element_; /// Layout of policy using Layout = layout::ColumnMajor; /// Decomposition of elements among threads using Policy = Policy_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; // // Derived quantities // static_assert(!(Shape::kRow % Policy::WarpShape::kRow), "The warp-level GEMM M size must be divisible by the number of threads arranged along the M dimension."); static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero."); static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero."); /// Thread-level shape of a fragment using ThreadShape = MatrixShape< Shape::kRow / Policy::WarpShape::kRow, Shape::kColumn >; static_assert(!(ThreadShape::kRow % Policy::LaneMmaShape::kM), "Thread-level GEMM must be divisible by Policy::LaneMmaShape."); /// Number of individual loads using Iterations = MatrixShape< ThreadShape::kRow / Policy::LaneMmaShape::kM, ThreadShape::kColumn >; /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, ThreadShape::kCount>; private: /// Internal reference cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kM>, layout::ColumnMajor> ref_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaSimtTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaSimtTileIterator( TensorRef ref, int lane_id ) { // compute offset based on thread ID and lane layout typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id) * MatrixCoord(Policy::LaneMmaShape::kM, 0); ref.add_coord_offset(lane_offset); ref_.reset( reinterpret_cast<Array<Element, Policy::LaneMmaShape::kM> *>(ref.data()), ref.stride(0) / Policy::LaneMmaShape::kM); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { ref_.add_coord_offset({ coord.row() * Shape::kRow / Policy::LaneMmaShape::kM, coord.column() * Shape::kColumn}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator++() { ref_.add_coord_offset({0, Shape::kColumn}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator--() { ref_.add_coord_offset({0, -Shape::kColumn}); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. (vector loads) CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { Array<Element, Policy::LaneMmaShape::kM> *dst_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kM> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kColumn; ++k) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Iterations::kRow; ++m) { // This logic has been replaced with calls to inline PTX to guarantee vectorization. #if 0 dst_ptr[m + k * Iterations::kRow] = *(ref_.data() + ref_.offset({m * Policy::WarpShape::kRow, k}) + pointer_offset / Policy::LaneMmaShape::kM); #endif auto ptr = ref_.data() + ref_.offset({m * Policy::WarpShape::kRow, k}) + pointer_offset / Policy::LaneMmaShape::kM; arch::shared_load(dst_ptr[m + k * Iterations::kRow], ptr); } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { Array<Element, Policy::LaneMmaShape::kM> const *src_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kM> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kN; ++k) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Iterations::kM; ++m) { *(ref_.data() + ref_.offset(m * Policy::WarpShape::kM, k) + pointer_offset / Policy::LaneMmaShape::kM) = src_ptr[m + k * Iterations::kM]; } } } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_pointer_offset(frag, 0); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Specialization for A operands of row-major layouts /// /// Concept: MutableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of A elements typename Element_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Number of partitions along K dimension - used in sliced-K int PartitionsK, /// Group Size along kPartition - used in sliced-K int PartitionGroupSize > class MmaSimtTileIterator<Shape_, Operand::kA, Element_, layout::RowMajor, Policy_, PartitionsK, PartitionGroupSize> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kA; /// Element type using Element = Element_; /// Layout of policy using Layout = layout::RowMajor; /// Decomposition of elements among threads using Policy = Policy_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; // // Derived quantities // static_assert(!(Shape::kRow % Policy::WarpShape::kRow), "The warp-level GEMM M size must be divisible by the number of threads arranged along the M dimension."); static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero."); static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero."); /// Thread-level shape of a fragment using ThreadShape = MatrixShape< Shape::kRow / Policy::WarpShape::kRow, Shape::kColumn >; static_assert(!(ThreadShape::kRow % Policy::LaneMmaShape::kM), "Thread-level GEMM must be divisible by Policy::LaneMmaShape."); /// Number of individual loads (scalar loads) using Iterations = MatrixShape< ThreadShape::kRow / Policy::LaneMmaShape::kM, ThreadShape::kColumn >; /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, ThreadShape::kCount>; private: /// Internal reference cutlass::TensorRef<Element, layout::RowMajor> ref_; /// Extent of tensor MatrixCoord extent_; /// Origin MatrixCoord origin_; /// Used to conditionally enable extents checking bool divisible_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaSimtTileIterator() : divisible_(true) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaSimtTileIterator( TensorRef ref, int lane_id ) : extent_(Shape::kRow, Shape::kColumn), divisible_ (true) { // compute offset based on thread ID and lane layout typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id) * MatrixCoord(Policy::LaneMmaShape::kM, 0); origin_ = lane_offset; ref.add_coord_offset(lane_offset); ref_.reset(ref.data(), ref.stride(0)); } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaSimtTileIterator( TensorRef ref, TensorCoord extent, int lane_id ) : extent_(extent), divisible_ (false) { // compute offset based on thread ID and lane layout typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id) * MatrixCoord(Policy::LaneMmaShape::kM, 0); origin_ = lane_offset; ref.add_coord_offset(lane_offset); ref_.reset(ref.data(), ref.stride(0)); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { TensorCoord coord_offset( coord.row() * Shape::kRow, coord.column() * Shape::kColumn); origin_ += coord_offset; ref_.add_coord_offset(coord_offset); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator++() { ref_.add_coord_offset({0, Shape::kColumn}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator--() { ref_.add_coord_offset({0, -Shape::kColumn}); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. (scalar loads) CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kColumn; ++k) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Iterations::kRow; ++m) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Policy::LaneMmaShape::kM; i++) { MatrixCoord offset(m * Policy::WarpShape::kRow * Policy::LaneMmaShape::kM + i, k); MatrixCoord access_coord = origin_ + offset; int frag_idx = m * Policy::LaneMmaShape::kM + i + k * Iterations::kRow; if (divisible_ || (access_coord.row() < extent_.row() && access_coord.column() < extent_.column())) { frag[frag_idx] = *(ref_.data() + ref_.offset(offset) + pointer_offset); } else { frag[frag_idx] = Element(); } } } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kColumn; ++k) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Iterations::kRow; ++m) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Policy::LaneMmaShape::kM; i++) { *(ref_.data() + ref_.offset(m * Policy::WarpShape::kM * Policy::LaneMmaShape::kM + i, k) + pointer_offset) = frag[m * Policy::LaneMmaShape::kM + i + k * Iterations::kM]; } } } } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_pointer_offset(frag, 0); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Specialization for B operands of row-major layouts /// /// Concept: MutableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of A elements typename Element_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Number of partitions along K dimension int PartitionsK, /// Group Size along kPartition - used in sliced-K int PartitionGroupSize > class MmaSimtTileIterator<Shape_, Operand::kB, Element_, layout::RowMajor, Policy_, PartitionsK, PartitionGroupSize> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kB; /// Element type using Element = Element_; /// Layout of policy using Layout = layout::RowMajor; /// Decomposition of elements among threads using Policy = Policy_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; // // Derived quantities // static_assert(!(Shape::kColumn % Policy::WarpShape::kColumn), "The warp-level GEMM N size must be divisible by the number of threads arranged along the N dimension."); static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); static_assert(Policy::WarpShape::kColumn > 0, "Policy::WarpShape::kColumn must be greater than zero."); static_assert(Shape::kColumn / Policy::WarpShape::kColumn > 0, "Shape::kColumn / Policy::WarpShape::kColumn must be greater than zero."); /// Thread-level shape of a fragment using ThreadShape = MatrixShape< Shape::kRow, Shape::kColumn / Policy::WarpShape::kColumn >; static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN), "Thread-level GEMM must be divisible by Policy::LaneMmaShape."); /// Number of individual loads using Iterations = MatrixShape< ThreadShape::kRow, ThreadShape::kColumn / Policy::LaneMmaShape::kN >; /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, ThreadShape::kCount>; protected: /// Internal reference cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kN>, layout::RowMajor> ref_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaSimtTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaSimtTileIterator( TensorRef ref, int lane_id ) { // compute offset based on thread ID and lane layout typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN); ref.add_coord_offset(lane_offset); ref_.reset( reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(ref.data()), ref.stride(0) / Policy::LaneMmaShape::kN); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { ref_.add_coord_offset({ coord.row() * Shape::kRow, coord.column() * Shape::kColumn / Policy::LaneMmaShape::kN}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator++() { ref_.add_coord_offset({Shape::kRow, 0}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator--() { ref_.add_coord_offset({-Shape::kRow, 0}); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. (vector loads) CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { Array<Element, Policy::LaneMmaShape::kN> *dst_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kRow; ++k) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kColumn; ++n) { #if 0 dst_ptr[n + k * Iterations::kColumn] = *(ref_.data() + ref_.offset({k, n * Policy::WarpShape::kColumn}) + pointer_offset / Policy::LaneMmaShape::kN); #endif void const *ptr = ref_.data() + ref_.offset({k, n * Policy::WarpShape::kColumn}) + pointer_offset / Policy::LaneMmaShape::kN; arch::shared_load(dst_ptr[n + k * Iterations::kColumn], ptr); } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { Array<Element, Policy::LaneMmaShape::kN> const *src_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kM; ++k) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kN; ++n) { *(ref_.data() + ref_.offset({k, n * Policy::WarpShape::kN}) + pointer_offset / Policy::LaneMmaShape::kN) = src_ptr[n + k * Iterations::kN]; } } } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag, Index pointer_offset) const { store_with_pointer_offset(frag, 0); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Specialization for B operands of column-major layouts /// /// Concept: MutableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of A elements typename Element_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Number of partitions along K dimension int PartitionsK, /// Group Size along kPartition - used in sliced-K int PartitionGroupSize > class MmaSimtTileIterator<Shape_, Operand::kB, Element_, layout::ColumnMajor, Policy_, PartitionsK, PartitionGroupSize> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kB; /// Element type using Element = Element_; /// Layout of policy using Layout = layout::ColumnMajor; /// Decomposition of elements among threads using Policy = Policy_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; // // Derived quantities // static_assert(!(Shape::kColumn % Policy::WarpShape::kColumn), "The warp-level GEMM N size must be divisible by the number of threads arranged along the N dimension."); static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); static_assert(Policy::WarpShape::kColumn > 0, "Policy::WarpShape::kColumn must be greater than zero."); static_assert(Shape::kColumn / Policy::WarpShape::kColumn > 0, "Shape::kColumn / Policy::WarpShape::kColumn must be greater than zero."); /// Thread-level shape of a fragment using ThreadShape = MatrixShape< Shape::kRow, Shape::kColumn / Policy::WarpShape::kColumn >; static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN), "Thread-level GEMM must be divisible by Policy::LaneMmaShape."); /// Number of individual loads using Iterations = MatrixShape< ThreadShape::kRow, ThreadShape::kColumn / Policy::LaneMmaShape::kN >; /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, ThreadShape::kCount>; private: /// Internal reference cutlass::TensorRef<Element, layout::ColumnMajor> ref_; /// Extent of tensor MatrixCoord extent_; /// Origin MatrixCoord origin_; /// Used to conditionally enable extents checking bool divisible_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaSimtTileIterator(): divisible_(true) { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaSimtTileIterator( TensorRef ref, int lane_id ): extent_(Shape::kRow, Shape::kColumn), divisible_(true) { // compute offset based on thread ID and lane layout typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN); origin_ = lane_offset; ref.add_coord_offset(lane_offset); ref_.reset(ref.data(), ref.stride(0)); } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaSimtTileIterator( TensorRef ref, TensorCoord extent, int lane_id ): extent_(extent), divisible_(false) { // compute offset based on thread ID and lane layout typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN); origin_ = lane_offset; ref.add_coord_offset(lane_offset); ref_.reset(ref.data(), ref.stride(0)); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { TensorCoord coord_offset( coord.row() * Shape::kRow, coord.column() * Shape::kColumn); origin_ += coord_offset; ref_.add_coord_offset(coord_offset); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator++() { ref_.add_coord_offset({Shape::kRow, 0}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator--() { ref_.add_coord_offset({-Shape::kRow, 0}); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. (scalar loads) CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kRow; ++k) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kColumn; ++n) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Policy::LaneMmaShape::kN; ++i) { MatrixCoord offset(k, n * Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN + i); MatrixCoord access_coord = origin_ + offset; int frag_idx = n * Policy::LaneMmaShape::kN + i + k * Iterations::kColumn; if (divisible_ || (access_coord.row() < extent_.row() && access_coord.column() < extent_.column())) { frag[frag_idx] = *(ref_.data() + ref_.offset(offset) + pointer_offset); } else { frag[frag_idx] = Element(); } } } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { Array<Element, Policy::LaneMmaShape::kN> const *src_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kM; ++k) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kN; ++n) { *(ref_.data() + ref_.offset({k, n * Policy::WarpShape::kN}) + pointer_offset / Policy::LaneMmaShape::kN) = src_ptr[n + k * Iterations::kN]; } } } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag, Index pointer_offset) const { store_with_pointer_offset(frag, 0); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Specialization for C operands of column-major layouts /// /// Concept: MutableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of A elements typename Element_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_ > class MmaSimtTileIterator<Shape_, Operand::kC, Element_, layout::ColumnMajor, Policy_> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kC; /// Element type using Element = Element_; /// Layout of accumulators in memory using Layout = layout::ColumnMajor; /// Decomposition of elements among threads using Policy = Policy_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; // // Derived quantities // static_assert( (!(Shape::kRow % Policy::WarpShape::kRow)) && (!(Shape::kColumn % Policy::WarpShape::kColumn)), "Warp-level GEMM shape must be divisible by the arrangement of threads in the warp."); static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero."); static_assert(Policy::WarpShape::kColumn > 0, "Policy::WarpShape::kColumn must be greater than zero."); static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero."); static_assert(Shape::kColumn / Policy::WarpShape::kColumn > 0, "Shape::kColumn / Policy::WarpShape::kColumn must be greater than zero."); /// Thraed-level shape of a fragment using ThreadShape = MatrixShape< Shape::kRow / Policy::WarpShape::kRow, Shape::kColumn / Policy::WarpShape::kColumn >; static_assert( (!(ThreadShape::kRow % Policy::LaneMmaShape::kM)) && (!(ThreadShape::kColumn % Policy::LaneMmaShape::kN)), "Warp-level GEMM shape must be divisible by the arrangement of threads in the warp."); /// Number of individual loads using Iterations = MatrixShape< ThreadShape::kRow / Policy::LaneMmaShape::kM, ThreadShape::kColumn / Policy::LaneMmaShape::kN >; using Delta = MatrixShape< Policy::WarpShape::kRow * Policy::LaneMmaShape::kM, Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN >; /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, ThreadShape::kCount>; private: TensorRef ref_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaSimtTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaSimtTileIterator( TensorRef const &ref, int lane_id ): ref_(ref) { // compute offset based on thread ID and lane layout typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id) * MatrixCoord(Policy::LaneMmaShape::kM, Policy::LaneMmaShape::kN); ref_.add_coord_offset(lane_offset); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { ref_.add_coord_offset({ coord.row() * Shape::kRow, coord.column() * Shape::kColumn}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator++() { ref_.add_coord_offset({Shape::kRow, 0}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator--() { ref_.add_coord_offset({-Shape::kRow, 0}); return *this; } /// Loads a fragment from memory with additional logical offset CUTLASS_HOST_DEVICE void load_with_pointer_offset( Fragment &frag, ///< fragment to be loaded from memory Index pointer_offset) const { ///< linear offset (in units of Element) when loading CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Iterations::kN; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) { Array<Element, Policy::LaneMmaShape::kM> const *src_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kM> const *>( ref_.data() + pointer_offset + ref_.offset({0, mma_n * Delta::kN + n})); CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Iterations::kM; ++mma_m) { Array<Element, Policy::LaneMmaShape::kM> *dst_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kM> *>(&frag) + mma_m + Iterations::kM * (n + mma_n * Policy::LaneMmaShape::kN); *dst_ptr = src_ptr[mma_m * Policy::WarpShape::kM]; } } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) { Array<Element, Policy::LaneMmaShape::kM> *dst_ptr= reinterpret_cast<Array<Element, Policy::LaneMmaShape::kM> *>( ref_.data() + pointer_offset + ref_.offset({0, mma_n * Delta::kColumn + n})); CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) { Array<Element, Policy::LaneMmaShape::kM> const *src_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kM> const *>(&frag) + mma_m + Iterations::kRow * (n + mma_n * Policy::LaneMmaShape::kN); dst_ptr[mma_m * Policy::WarpShape::kRow] = *src_ptr; } } } } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_pointer_offset(frag, 0); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Specialization for C operands of row-major layouts /// /// Concept: MutableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of A elements typename Element_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_ > class MmaSimtTileIterator<Shape_, Operand::kC, Element_, layout::RowMajor, Policy_> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kC; /// Element type using Element = Element_; /// Layout of accumulators in memory using Layout = layout::RowMajor; /// Decomposition of elements among threads using Policy = Policy_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; // // Derived quantities // static_assert( (!(Shape::kRow % Policy::WarpShape::kRow)) && (!(Shape::kColumn % Policy::WarpShape::kColumn)), "Warp-level GEMM shape must be divisible by the arrangement of threads in the warp."); static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero."); static_assert(Policy::WarpShape::kColumn > 0, "Policy::WarpShape::kColumn must be greater than zero."); static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero."); static_assert(Shape::kColumn / Policy::WarpShape::kColumn > 0, "Shape::kColumn / Policy::WarpShape::kColumn must be greater than zero."); /// Thraed-level shape of a fragment using ThreadShape = MatrixShape< Shape::kRow / Policy::WarpShape::kRow, Shape::kColumn / Policy::WarpShape::kColumn >; static_assert( (!(ThreadShape::kRow % Policy::LaneMmaShape::kM)) && (!(ThreadShape::kColumn % Policy::LaneMmaShape::kN)), "Warp-level GEMM shape must be divisible by the arrangement of threads in the warp."); /// Number of individual loads using Iterations = MatrixShape< ThreadShape::kRow / Policy::LaneMmaShape::kM, ThreadShape::kColumn / Policy::LaneMmaShape::kN >; using Delta = MatrixShape< Policy::WarpShape::kRow * Policy::LaneMmaShape::kM, Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN >; /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, ThreadShape::kCount>; private: TensorRef ref_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaSimtTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaSimtTileIterator( TensorRef const &ref, int lane_id ): ref_(ref) { // compute offset based on thread ID and lane layout typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id) * MatrixCoord(Policy::LaneMmaShape::kM, Policy::LaneMmaShape::kN); ref_.add_coord_offset(lane_offset); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { ref_.add_coord_offset({ coord.row() * Shape::kRow, coord.column() * Shape::kColumn}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator++() { ref_.add_coord_offset({Shape::kRow, 0}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator--() { ref_.add_coord_offset({-Shape::kRow, 0}); return *this; } /// Loads a fragment from memory with additional logical offset CUTLASS_HOST_DEVICE void load_with_pointer_offset( Fragment &frag, ///< fragment to be loaded from memory Index pointer_offset) const { ///< linear offset (in units of Element) when loading CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) { Array<Element, Policy::LaneMmaShape::kN> const *src_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> const *>( ref_.data() + pointer_offset + ref_.offset({mma_m * Delta::kRow + m, 0})); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) { Array<Element, Policy::LaneMmaShape::kN> *dst_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag) + mma_n + Iterations::kColumn * (m + mma_m * Policy::LaneMmaShape::kM); *dst_ptr = src_ptr[mma_n * Policy::WarpShape::kColumn]; } } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) { Array<Element, Policy::LaneMmaShape::kN> *dst_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>( ref_.data() + pointer_offset + ref_.offset({mma_m * Delta::kRow + m, 0})); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) { Array<Element, Policy::LaneMmaShape::kN> const *src_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> const *>(&frag) + mma_n + Iterations::kColumn * (m + mma_m * Policy::LaneMmaShape::kM); dst_ptr[mma_n * Policy::WarpShape::kColumn] = *src_ptr; } } } } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_pointer_offset(frag, 0); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// /// Specialization for A operands of column-major-K interleaved layouts /// /// Concept: MutableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of A elements typename Element_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Number of partitions along K dimension int PartitionsK, /// Number of KGroups per kPartition int PartitionGroupSize > class MmaSimtTileIterator<Shape_, Operand::kA, Element_, layout::ColumnMajorInterleaved<4>, Policy_, PartitionsK, PartitionGroupSize> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kA; /// Element type using Element = Element_; /// Layout of policy using Layout = layout::ColumnMajorInterleaved<4> ; /// Decomposition of elements among threads using Policy = Policy_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Iterleave factor static const int kInterleave = 4; /// Number of partitions along K dimension static const int kPartitionsK = PartitionsK; /// Number of KGroups per kPartition static const int kGroupPerTile = PartitionGroupSize / Shape::kColumn; // // Derived quantities // static_assert(!(Shape::kRow % Policy::WarpShape::kRow), "The warp-level GEMM M size must be divisible by the number of threads arranged along the M dimension."); static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero."); static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero."); /// Thread-level shape of a fragment using ThreadShape = MatrixShape< Shape::kRow / Policy::WarpShape::kRow, Shape::kColumn >; static_assert(!(ThreadShape::kRow % Policy::LaneMmaShape::kM) && !(ThreadShape::kColumn % Policy::LaneMmaShape::kK), "Thread-level GEMM must be divisible by Policy::LaneMmaShape."); /// Number of individual loads using Iterations = MatrixShape< ThreadShape::kRow / Policy::LaneMmaShape::kM, ThreadShape::kColumn / Policy::LaneMmaShape::kK >; /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, ThreadShape::kCount>; private: /// Internal reference cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kMK>, layout::ColumnMajorInterleaved<4>> ref_; /// group index within tile int k_group_idx_; public: CUTLASS_HOST_DEVICE MmaSimtTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaSimtTileIterator( TensorRef ref, int lane_id ) { // compute offset based on thread ID and lane layout typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id) * MatrixCoord(Policy::LaneMmaShape::kM, 0); ref.add_coord_offset(lane_offset); k_group_idx_ = 0; ref_.reset(reinterpret_cast<Array<Element, Policy::LaneMmaShape::kMK> *>(ref.data()), ref.stride(0)/Policy::LaneMmaShape::kMK); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { ref_.add_coord_offset({ coord.row() * Shape::kRow / Policy::LaneMmaShape::kMK, coord.column() * Shape::kColumn}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator++() { add_tile_offset({0, 1}); if (kPartitionsK > 1) { ++k_group_idx_; // Jump to next stage if (k_group_idx_ == kGroupPerTile) { k_group_idx_ = 0; add_tile_offset({0, kGroupPerTile * (kPartitionsK-1)}); } } return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator--() { ref_.add_coord_offset({0, -Shape::kColumn}); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { Array<Element, Policy::LaneMmaShape::kMK > *dst_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kMK> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kColumn; ++k) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Iterations::kRow; ++m) { dst_ptr[m + k * Iterations::kRow] = *((ref_.data() + ref_.offset({m * Policy::WarpShape::kRow / kInterleave, k*Policy::LaneMmaShape::kK}) + pointer_offset / Policy::LaneMmaShape::kM)); } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { Array<Element, Policy::LaneMmaShape::kMK> const *src_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kMK > *>(&frag); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kN; ++k) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Iterations::kM; ++m) { *(ref_.data() + ref_.offset(m * Policy::WarpShape::kM, k) + pointer_offset / Policy::LaneMmaShape::kM) = src_ptr[m + k * Iterations::kM]; } } } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag) const { store_with_pointer_offset(frag, 0); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Specialization for B operands of row-major k-interleaved layouts /// /// Concept: MutableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of A elements typename Element_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Number of partitions along K dimension int PartitionsK, /// Number of KGroups per kPartition int PartitionGroupSize > class MmaSimtTileIterator<Shape_, Operand::kB, Element_, layout::RowMajorInterleaved<4>, Policy_, PartitionsK, PartitionGroupSize> { public: /// Shape of tile to load (concept: MatrixShape) using Shape = Shape_; /// Operand tag static Operand const kOperand = Operand::kB; /// Element type using Element = Element_; /// Layout of policy using Layout = layout::RowMajorInterleaved<4>; /// Decomposition of elements among threads using Policy = Policy_; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Interleave factor static const int kInterleave = 4; /// Number of partitions along K dimension static const int kPartitionsK = PartitionsK; /// Number of KGroups per kPartition static const int kGroupPerTile = PartitionGroupSize / Shape::kRow; // // Derived quantities // static_assert(!(Shape::kColumn % Policy::WarpShape::kColumn), "The warp-level GEMM N size must be divisible by the number of threads arranged along the N dimension."); static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero."); static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero."); static_assert(Policy::WarpShape::kColumn > 0, "Policy::WarpShape::kColumn must be greater than zero."); static_assert(Shape::kColumn / Policy::WarpShape::kColumn > 0, "Shape::kColumn / Policy::WarpShape::kColumn must be greater than zero."); /// Thread-level shape of a fragment using ThreadShape = MatrixShape< Shape::kRow, Shape::kColumn / Policy::WarpShape::kColumn >; static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN) && !(ThreadShape::kRow % Policy::LaneMmaShape::kK), "Thread-level GEMM must be divisible by Policy::LaneMmaShape."); /// Number of individual loads using Iterations = MatrixShape< ThreadShape::kRow / Policy::LaneMmaShape::kK, ThreadShape::kColumn / Policy::LaneMmaShape::kN >; /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, ThreadShape::kCount>; private: /// Internal reference cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kKN>, layout::RowMajorInterleaved<4>> ref_; /// group index within tile int k_group_idx_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE MmaSimtTileIterator() { } /// Constructor from TensorRef CUTLASS_HOST_DEVICE MmaSimtTileIterator( TensorRef ref, int lane_id ) { // compute offset based on thread ID and lane layout typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN); ref.add_coord_offset(lane_offset); k_group_idx_ = 0; ref_.reset( reinterpret_cast<Array<Element, Policy::LaneMmaShape::kKN> *>(ref.data()), ref.stride(0) / Policy::LaneMmaShape::kKN); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_pointer_offset(LongIndex offset) { ref_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_HOST_DEVICE MmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) { ref_.add_coord_offset({ coord.row() * Shape::kRow, coord.column() * Shape::kColumn / Policy::LaneMmaShape::kKN}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator++() { add_tile_offset({1, 0}); if (kPartitionsK > 1) { ++k_group_idx_; // Jump to next stage if (k_group_idx_ == kGroupPerTile) { k_group_idx_ = 0; add_tile_offset({kGroupPerTile * (kPartitionsK-1), 0}); } } return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE MmaSimtTileIterator & operator--() { ref_.add_coord_offset({-Shape::kRow, 0}); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const { Array<Element, Policy::LaneMmaShape::kKN> *dst_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kKN> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kRow; ++k) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kColumn; ++n) { dst_ptr[n + k * Iterations::kColumn] = *(ref_.data() + ref_.offset({k * Policy::LaneMmaShape::kK, n * Policy::WarpShape::kColumn / kInterleave}) + pointer_offset / Policy::LaneMmaShape::kN); } } } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const { Array<Element, Policy::LaneMmaShape::kN> const *src_ptr = reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Iterations::kM; ++k) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Iterations::kN; ++n) { *(ref_.data() + ref_.offset({k, n * Policy::WarpShape::kN}) + pointer_offset / Policy::LaneMmaShape::kN) = src_ptr[n + k * Iterations::kN]; } } } /// Stores a fragment to memory at the location pointed to by the iterator CUTLASS_HOST_DEVICE void store(Fragment const &frag, Index pointer_offset) const { store_with_pointer_offset(frag, 0); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { // no operation here } }; /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass
include/cutlass/gemm/warp/mma_simt_tile_iterator.h/0
{ "file_path": "include/cutlass/gemm/warp/mma_simt_tile_iterator.h", "repo_id": "include", "token_count": 21069 }
33
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief layouts needed by Ampere fp64 tensor core kernels. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor_op_multiplicand_sm75.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace layout { //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear /// memory and Crosswise size (in elements). struct TensorOpMultiplicandCongruous64b { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Static constants // static int const kElementSize = 64; static int const kElementsPerAccess = 1; private: // // Data members // /// Stride data member. Stride stride_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCongruous64b(Index ldm = 0) : stride_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCongruous64b(Stride stride) : stride_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicandCongruous64b packed(TensorCoord const &extent) { return TensorOpMultiplicandCongruous64b(extent[0]); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { int tc = coord.contiguous() / 16; int ts = coord.strided() / 4; int c = coord.contiguous() % 16; int s = coord.strided() % 4; int bank = ((((c & 1) * 4 + (c & 6) / 2)) ^ (s & 1)) * 2 + (c / 8); int row = (c & 6) / 2; bank ^= ((s & 2) * 2); LongIndex offset = tc * 16 + bank + (ts * 4 + row) * stride_[0]; return offset; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return extent[1] * stride_[0]; } CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { return TensorCoord(); } }; //////////////////////////////////////////////////////////////////////////////// /// Template mapping a column-major view of pitch-linear memory to /// TensorOpMultiplicand struct ColumnMajorTensorOpMultiplicandCongruous64b { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicandCongruous64b; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicandCongruous64b(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicandCongruous64b(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static ColumnMajorTensorOpMultiplicandCongruous64b packed(TensorCoord const &extent) { return ColumnMajorTensorOpMultiplicandCongruous64b(extent.row()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.row(), coord.column())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.contiguous(), coord.strided()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); } }; //////////////////////////////////////////////////////////////////////////////// /// Template mapping a row-major view of pitch-linear memory to /// TensorOpMultiplicand struct RowMajorTensorOpMultiplicandCongruous64b { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicandCongruous64b; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicandCongruous64b(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicandCongruous64b(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static RowMajorTensorOpMultiplicandCongruous64b packed(TensorCoord const &extent) { return RowMajorTensorOpMultiplicandCongruous64b(extent.column()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.column(), coord.row())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.strided(), coord.contiguous()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); } }; //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear /// memory and Crosswise size (in elements). struct TensorOpMultiplicand64bCrosswise { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Static constants // static int const kElementSize = 64; static int const kElementsPerAccess = 1; private: // // Data members // /// Stride data member. Stride stride_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicand64bCrosswise(Index ldm = 0) : stride_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicand64bCrosswise(Stride stride) : stride_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicand64bCrosswise packed(TensorCoord const &extent) { return TensorOpMultiplicand64bCrosswise(extent[0]); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { int tc = coord.contiguous() / 16; int ts = coord.strided() / 16; int c = coord.contiguous() % 16; int s = coord.strided() % 16; int k_group = c / 4; int access_s = s / 2; int row = access_s % 4; int bank = ((k_group & 2) << 2) ^ ((s % 2) << 3) + (c % 4) * 2 + (access_s / 4) ^ (k_group & 1); int smem_row = (k_group * 4 + row) + tc * 16; int smem_col = ts * 16 + bank; LongIndex offset = smem_row * stride_[0] + smem_col; return offset; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return extent[1] * stride_[0]; } }; //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear /// memory and Crosswise size (in elements). struct ColumnMajorTensorOpMultiplicand64bCrosswise { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicand64bCrosswise; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicand64bCrosswise(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicand64bCrosswise(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static ColumnMajorTensorOpMultiplicand64bCrosswise packed(TensorCoord const &extent) { return ColumnMajorTensorOpMultiplicand64bCrosswise(extent.column()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.row(), coord.column())); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); } }; //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear /// memory and Crosswise size (in elements). struct RowMajorTensorOpMultiplicand64bCrosswise { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicand64bCrosswise; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicand64bCrosswise(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicand64bCrosswise(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static RowMajorTensorOpMultiplicand64bCrosswise packed(TensorCoord const &extent) { return RowMajorTensorOpMultiplicand64bCrosswise(extent.row()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.column(), coord.row())); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); } }; //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear /// memory and Crosswise size (in elements). struct TensorOpMultiplicandCongruous128b { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Static constants // static int const kElementSize = 128; static int const kElementsPerAccess = 1; private: // // Data members // /// Stride data member. Stride stride_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCongruous128b(Index ldm = 0) : stride_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCongruous128b(Stride stride) : stride_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicandCongruous128b packed(TensorCoord const &extent) { return TensorOpMultiplicandCongruous128b(extent[0]); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { Index tc = coord.contiguous() / 8; Index ts = coord.strided() / 4; Index c = coord.contiguous() % 8; Index s = coord.strided() % 4; Index k_index = (c / 2); Index bank = (((c & 1) * 4) | (s ^ k_index)); LongIndex offset = tc * 8 + bank + (ts * 4 + k_index) * stride_[0]; return offset; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return extent[1] * stride_[0]; } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { return TensorCoord(); } }; //////////////////////////////////////////////////////////////////////////////// /// Template mapping a column-major view of pitch-linear memory to /// TensorOpMultiplicand struct ColumnMajorTensorOpMultiplicandCongruous128b { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicandCongruous128b; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicandCongruous128b(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicandCongruous128b(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static ColumnMajorTensorOpMultiplicandCongruous128b packed(TensorCoord const &extent) { return ColumnMajorTensorOpMultiplicandCongruous128b(extent.row()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.row(), coord.column())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.contiguous(), coord.strided()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); } }; //////////////////////////////////////////////////////////////////////////////// /// Template mapping a row-major view of pitch-linear memory to /// TensorOpMultiplicand struct RowMajorTensorOpMultiplicandCongruous128b { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicandCongruous128b; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicandCongruous128b(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicandCongruous128b(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static RowMajorTensorOpMultiplicandCongruous128b packed(TensorCoord const &extent) { return RowMajorTensorOpMultiplicandCongruous128b(extent.column()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.column(), coord.row())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.strided(), coord.contiguous()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); } }; //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear /// memory and Crosswise size (in elements). struct TensorOpMultiplicandCrosswise128x4 { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Static constants // static int const kElementSize = 128; static int const kElementsPerAccess = 1; private: // // Data members // /// Stride data member. Stride stride_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCrosswise128x4(Index ldm = 0) : stride_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCrosswise128x4(Stride stride) : stride_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicandCrosswise128x4 packed(TensorCoord const &extent) { return TensorOpMultiplicandCrosswise128x4(extent[0]); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { Index tc = coord.contiguous() / 8; Index ts = coord.strided() / 8; Index c = coord.contiguous() % 8; Index s = coord.strided() % 8; Index liq = c % 4; Index bank = liq + ((s & 1) * 4) ^ (c & 4); Index k_index = (c & 4) + (s / 4) * 2 + ((s & 2) / 2); LongIndex offset = (tc * 8 + k_index) * stride_[0] + ts * 8 + bank; return offset; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return extent[1] * stride_[0]; } }; //////////////////////////////////////////////////////////////////////////////// /// Template mapping a column-major view of pitch-linear memory to /// TensorOpMultiplicand struct ColumnMajorTensorOpMultiplicandCrosswise128x4 { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicandCrosswise128x4; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicandCrosswise128x4(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicandCrosswise128x4(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static ColumnMajorTensorOpMultiplicandCrosswise128x4 packed(TensorCoord const &extent) { return ColumnMajorTensorOpMultiplicandCrosswise128x4(extent.column()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.row(), coord.column())); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); } }; //////////////////////////////////////////////////////////////////////////////// /// Template mapping a row-major view of pitch-linear memory to /// TensorOpMultiplicand struct RowMajorTensorOpMultiplicandCrosswise128x4 { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicandCrosswise128x4; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicandCrosswise128x4(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicandCrosswise128x4(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static RowMajorTensorOpMultiplicandCrosswise128x4 packed(TensorCoord const &extent) { return RowMajorTensorOpMultiplicandCrosswise128x4(extent.row()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.column(), coord.row())); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace layout } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/layout/tensor_op_multiplicand_sm80.h/0
{ "file_path": "include/cutlass/layout/tensor_op_multiplicand_sm80.h", "repo_id": "include", "token_count": 9385 }
34
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Kernel performing a reduction over one or more ranks of an affine tensor */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/fast_math.h" #include "cutlass/numeric_types.h" #include "cutlass/numeric_conversion.h" #include "cutlass/device_kernel.h" #include "cutlass/reduction/device/tensor_reduce_affine_strided.h" #include "cutlass/reduction/device/tensor_reduce_affine_contiguous.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace reduction { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Tensor reduction operator on specific CUTLASS layouts over exactly one index template < typename ElementOutput_, typename ElementSource_, typename Layout_, typename ReductionOp_, int VectorLength_ = 1, typename ElementCompute_ = ElementOutput_ > struct TensorReduction { using ElementOutput = ElementOutput_; using ElementSource = ElementSource_; using Layout = Layout_; using ReductionOp = ReductionOp_; static int const kVectorLength = VectorLength_; using ElementCompute = ElementCompute_; using TensorCoord = typename Layout::TensorCoord; /// Reduction operator using ReductionDeviceStridedOperator = TensorReductionAffineStrided< 4, 3, ElementOutput, ElementSource, ReductionOp, kVectorLength, ElementCompute >; using ReductionDeviceContiguousOperator = TensorReductionAffineContiguous< 4, 3, ElementOutput, ElementSource, ReductionOp, kVectorLength, ElementCompute >; // // Data members // ReductionDeviceStridedOperator reduction_strided; ReductionDeviceContiguousOperator reduction_contiguous; int reduction_index; // // Methods // /// TensorReduction( TensorCoord extent, int reduction_index_ ): reduction_index(reduction_index_) { Coord<4> extent_affine; switch (reduction_index) { case 0: extent_affine[0] = extent[1]; extent_affine[1] = extent[2]; extent_affine[2] = extent[0]; extent_affine[3] = extent[3]; break; case 1: extent_affine[0] = extent[0]; extent_affine[1] = extent[2]; extent_affine[2] = extent[1]; extent_affine[3] = extent[3]; break; case 2: extent_affine[0] = extent[0]; extent_affine[1] = extent[1]; extent_affine[2] = extent[2]; extent_affine[3] = extent[3]; break; case 3: extent_affine[0] = extent[0]; extent_affine[1] = extent[1]; extent_affine[2] = extent[2]; extent_affine[3] = extent[3]; break; default: break; } if (reduction_index == 3) { reduction_contiguous = ReductionDeviceContiguousOperator(extent_affine); } else { reduction_strided = ReductionDeviceStridedOperator(extent_affine); } } /// Simple check to verify the object is initialized correctly bool good() const { if (reduction_index == 3) { return reduction_contiguous.good(); } return reduction_strided.good(); } /// Size of one workspace int64_t workspace_stride() const { if (reduction_index == 3) { return reduction_contiguous.workspace_stride(); } else { return reduction_strided.workspace_stride(); } } /// Returns the size (in bytes) of a temporary workspace needed for reduction across CTAs int64_t workspace_size() const { if (reduction_index == 3) { return reduction_contiguous.workspace_size(); } else { return reduction_strided.workspace_size(); } } /// Helper to use overloaded function call operator Status reduce( TensorRef<ElementOutput, Layout> dst_ref, TensorRef<ElementSource, Layout> src_ref, void *device_workspace_ptr = nullptr, ElementCompute reduction_identity = ElementCompute(), ReductionOp reduction_op = ReductionOp(), cudaStream_t stream = nullptr) { int64_t src_stride[3]; int64_t dst_stride[3]; switch (reduction_index) { case 0: src_stride[0] = src_ref.stride()[1]; src_stride[1] = src_ref.stride()[0]; src_stride[2] = src_ref.stride()[2]; dst_stride[0] = dst_ref.stride()[1]; dst_stride[1] = dst_ref.stride()[0]; break; case 1: src_stride[0] = src_ref.stride()[2]; src_stride[1] = src_ref.stride()[0]; src_stride[2] = src_ref.stride()[1]; dst_stride[0] = dst_ref.stride()[2]; dst_stride[1] = dst_ref.stride()[0]; break; case 2: src_stride[0] = src_ref.stride()[2]; src_stride[1] = src_ref.stride()[1]; src_stride[2] = src_ref.stride()[0]; dst_stride[0] = dst_ref.stride()[2]; dst_stride[1] = dst_ref.stride()[1]; break; case 3: src_stride[0] = src_ref.stride()[2]; src_stride[1] = src_ref.stride()[1]; src_stride[2] = src_ref.stride()[0]; dst_stride[0] = dst_ref.stride()[2]; dst_stride[1] = dst_ref.stride()[1]; dst_stride[2] = dst_ref.stride()[0]; default: break; } if (reduction_index == 3) { return reduction_contiguous( dst_ref.data(), dst_stride, src_ref.data(), src_stride, device_workspace_ptr, reduction_identity, reduction_op, stream); } else { return reduction_strided( dst_ref.data(), dst_stride, src_ref.data(), src_stride, device_workspace_ptr, reduction_identity, reduction_op, stream); } } Status operator()( TensorRef<ElementOutput, Layout> dst_ref, TensorRef<ElementSource, Layout> src_ref, void *device_workspace_ptr = nullptr, ElementCompute reduction_identity = ElementCompute(), ReductionOp reduction_op = ReductionOp(), cudaStream_t stream = nullptr) { return reduce( dst_ref, src_ref, device_workspace_ptr, reduction_identity, reduction_op, stream); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace reduction } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/reduction/device/tensor_reduce.h/0
{ "file_path": "include/cutlass/reduction/device/tensor_reduce.h", "repo_id": "include", "token_count": 2944 }
35
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines a structure containing strides and a pointer to tensor data. TensorView is derived from TensorRef and contributes bounds to the tensor's index space. Thus, it is a complete mathematical object and may be used in tensor algorithms. It is decoupled from data storage and is therefore lightweight and may be embedded in larger tensor objects or memory structures. See cutlass/tensor_ref.h for more details about the mapping of the logical tensor index space to linear memory. */ #pragma once #if !defined(__CUDACC_RTC__) #include <cmath> #endif #include "cutlass/cutlass.h" #include "cutlass/tensor_ref.h" namespace cutlass { //////////////////////////////////////////////////////////////////////////////////////////////////// template < /// Data type of element stored within tensor typename Element_, /// Maps a Coord<Rank_> in the logical tensor index space to the internal n-D array typename Layout_ > class TensorView : public TensorRef<Element_, Layout_> { public: /// Base tensor reference using Base = cutlass::TensorRef<Element_, Layout_>; /// Mapping function from logical coordinate to internal n-D array using Layout = Layout_; /// TensorRef pointing to constant memory using ConstTensorRef = typename Base::ConstTensorRef; /// Underlying TensorRef type using TensorRef = Base; /// Data type of individual access using Element = Element_; /// Reference type to an element using Reference = Element &; /// Logical rank of tensor index space static int const kRank = Layout::kRank; /// Index type using Index = typename Layout::Index; /// Long index used for pointer offsets using LongIndex = typename Layout::LongIndex; /// Coordinate in logical tensor space using TensorCoord = typename Layout::TensorCoord; /// Coordinate in storage n-D array using Stride = typename Layout::Stride; /// TensorView pointing to constant memory using ConstTensorView = TensorView< typename platform::remove_const<Element>::type const, Layout>; /// TensorView pointing to non-constant memory using NonConstTensorView = TensorView< typename platform::remove_const<Element>::type, Layout>; /// Require at least rank=1. Mathematically, a rank=0 tensor would be considered to be a /// scalar, but degenerate cases such as these are difficult to accommodate without /// extensive C++ metaprogramming or support for zero-length arrays. static_assert(kRank > 0, "Cannot define a zero-rank TensorRef"); private: /// View extent TensorCoord extent_; public: // // Methods // /// Constructs a TensorView object CUTLASS_HOST_DEVICE TensorView() { } /// Constructs a TensorView object CUTLASS_HOST_DEVICE TensorView( Element *ptr, ///< pointer to start of tensor Layout const &layout, ///< layout object containing stride and mapping function TensorCoord const &extent ///< size of the view in logical coordinates ): Base(ptr, layout), extent_(extent) { } /// Constructs a TensorView object CUTLASS_HOST_DEVICE TensorView( TensorRef const &ref, ///< pointer and layout object referencing a tensor TensorCoord const &extent ///< logical size of tensor ): Base(ref), extent_(extent) { } /// Converting constructor from TensorRef to non-constant data. CUTLASS_HOST_DEVICE TensorView( NonConstTensorView const &view ///< TensorView to non-const data ): Base(view), extent_(view.extent_) { } /// Updates the pointer and layout object CUTLASS_HOST_DEVICE void reset(Element* ptr, Layout const &layout, TensorCoord const &extent) { Base::reset(ptr, layout); this->resize(extent); } /// Updates the pointer CUTLASS_HOST_DEVICE void reset(Element* ptr) { Base::reset(ptr); } /// Changes the size of the view without affecting pointer or layout CUTLASS_HOST_DEVICE void resize(TensorCoord const &extent) { this->extent_ = extent; } /// Returns the extent of the view (the size along each logical dimension). CUTLASS_HOST_DEVICE TensorCoord const& extent() const { return extent_; } /// Returns the extent along a particular logical dimension. CUTLASS_HOST_DEVICE Index extent(int dim) const { return extent_.at(dim); } /// Returns the number of logical elements CUTLASS_HOST_DEVICE LongIndex size() const { return extent_.product(); } /// Determines whether a location is within a tensor CUTLASS_HOST_DEVICE bool contains(TensorCoord const& coord) const { CUTLASS_PRAGMA_UNROLL for (int dim = 0; dim < kRank; ++dim) { if (!(coord[dim] >= 0 && coord[dim] < extent(dim))) { return false; } } return true; } /// Returns a TensorRef pointing to the first element of the tensor. CUTLASS_HOST_DEVICE TensorRef ref() const { return TensorRef(this->data(), this->layout()); } /// Returns a TensorRef pointing to the first element of the tensor. CUTLASS_HOST_DEVICE ConstTensorRef const_ref() const { return ConstTensorRef(this->data(), this->layout()); } /// Returns a TensorView to const data CUTLASS_HOST_DEVICE ConstTensorView const_view() const { return ConstTensorView(const_ref(), extent_); } /// Returns a Tensor_view given location and size quantities CUTLASS_HOST_DEVICE TensorView subview( TensorCoord extent, ///< extent of the resulting view TensorCoord const& location = TensorCoord() ///< resulting view's origin within the old view ) const { TensorView result(this->ref(), extent.clamp(extent_ - location)); result.add_coord_offset(location); return result; } /// Returns the number of scalar elements needed to store tensor. CUTLASS_HOST_DEVICE size_t capacity() const { return Base::layout().capacity(extent_); } /// Returns a TensorView offset by a given amount CUTLASS_HOST_DEVICE TensorView operator+( TensorCoord const& b ///< offset in the logical coordinate space of the tensor ) const { TensorView result(*this); result.add_pointer_offset(this->offset(b)); return result; } /// Returns a TensorRef offset by a given amount CUTLASS_HOST_DEVICE TensorView& operator+=( TensorCoord const& b ///< offset in the logical coordinate space of the tensor ) { this->add_pointer_offset(this->offset(b)); return *this; } /// Returns a TensorRef offset by a given amount CUTLASS_HOST_DEVICE TensorView operator-( TensorCoord const& b ///< offset in the logical coordinate space of the tensor ) const { TensorRef result(*this); result.add_pointer_offset(-this->offset(b)); return result; } /// Returns a TensorRef offset by a given amount CUTLASS_HOST_DEVICE TensorView& operator-=( TensorCoord const& b ///< offset in the logical coordinate space of the tensor ) { this->add_pointer_offset(-this->offset(b)); return *this; } }; //////////////////////////////////////////////////////////////////////////////////////////////////// /// Constructs a TensorRef, deducing types from arguments. template < typename Element, typename Layout > CUTLASS_HOST_DEVICE TensorView<Element, Layout> make_TensorView( Element *ptr, Layout const &layout, typename Layout::TensorCoord const &extent) { return TensorView<Element, Layout>(ptr, layout, extent); } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass
include/cutlass/tensor_view.h/0
{ "file_path": "include/cutlass/tensor_view.h", "repo_id": "include", "token_count": 2969 }
36
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates calculating the address and predicates to the load of tiles from pitch-linear rank=2 tensors. This iterator uses masks to guard out-of-bounds accesses. The first tile this iterator visits maybe partial, then the remaining tiles are complete. So, we only need to compute the predicates twice, once before the first tile and once for the remaining full tiles which can share the same predicates. A precomputed "Params" object minimizes the amount of state that must be stored in registers, and integer addition is used to advance the pointer through memory. */ #pragma once #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/permute.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/matrix_shape.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h" //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// PredicatedTileAccessIteratorPredicates /// template <typename Shape_, typename Element_, typename Layout_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIteratorPredicates { public: using Shape = Shape_; using Element = Element_; using Layout = Layout_; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorCoord = typename Layout::TensorCoord; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); static int const kPredicatesPerByte = 4; static int const kPredicatesPerWord = 4 * kPredicatesPerByte; static int const kPredicateCount = ThreadMap::Iterations::kCount * kAccessesPerVector; /// Number of 32b words containing predicates static int const kPredicateByteCount = (kPredicateCount + kPredicatesPerByte - 1) / kPredicatesPerByte; static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4; static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u; static_assert(kPredicateWordCount <= 4, "Too many predicates."); /// Predicate vector stores mask to guard accesses using Mask = Array<uint32_t, kPredicateWordCount>; // private: /// Guard predicates uint32_t predicates_[kPredicateWordCount]; /// Size of tensor TensorCoord extent_; /// Initial offset for each thread TensorCoord thread_offset_; /// Offset to the first steady-state tile TensorCoord residue_offset_; /// Iteration along vectors implied by the thread map int iteration_vector_; /// Iteration in the contiguous dimension int iteration_contiguous_; /// Iteration in the strided dimension int iteration_strided_; public: /// Computes predicates based on internally tracked per-thread offset. CUTLASS_DEVICE void compute_predicates_( /// Extent of the matrix window TensorCoord extent, /// optionally, simplify predicate calculation during 'steady state' phase bool is_steady_state = false) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = 0u; } CUTLASS_PRAGMA_UNROLL for (int access_idx = 0; access_idx < ThreadMap::Iterations::kCount * kAccessesPerVector; ++access_idx) { int s = access_idx / (ThreadMap::Iterations::kContiguous * kAccessesPerVector); int access_residual = access_idx % (ThreadMap::Iterations::kContiguous * kAccessesPerVector); int c = access_residual / kAccessesPerVector; int v = access_residual % kAccessesPerVector; TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous + v * AccessType::kElements, s * ThreadMap::Delta::kStrided); TensorCoord coord = thread_offset_ + iteration_coord; bool guard; if (is_steady_state) { if (kAdvanceRank == 0) { guard = (coord.strided() < extent.strided()); } else { guard = (coord.contiguous() < extent.contiguous()); } } else { guard = (coord.strided() < extent.strided() && coord.contiguous() < extent.contiguous()); } int pred_idx = v + kAccessesPerVector * (c + ThreadMap::Iterations::kContiguous * s); int word_idx = pred_idx / kPredicatesPerWord; int residual = pred_idx % kPredicatesPerWord; int byte_idx = residual / kPredicatesPerByte; int bit_idx = residual % kPredicatesPerByte; predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx)); } } CUTLASS_HOST_DEVICE void set_predicates(int thread_id, TensorCoord const &threadblock_offset) { TensorCoord residue_extent; if (kAdvanceRank) { typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.strided()) % Shape::kStrided; if (!residue_size) { residue_size = Shape::kStrided; } residue_offset_ = make_Coord(0, residue_size); residue_extent = make_Coord( extent_.contiguous(), min(threadblock_offset.strided() + residue_size, extent_.strided()) ); } else { typename TensorCoord::Index residue_size = (extent_[kAdvanceRank] - threadblock_offset.contiguous()) % Shape::kContiguous; if (!residue_size) { residue_size = Shape::kContiguous; } residue_offset_ = make_Coord(residue_size, 0); residue_extent = make_Coord( min(extent_.contiguous(), threadblock_offset.contiguous() + residue_size), extent_.strided() ); } // Per-thread offset in logical coordinates of tensor thread_offset_ = threadblock_offset + ThreadMap::initial_offset(thread_id); compute_predicates_(residue_extent, false); set_iteration_index(0); } /// Default constructor PredicatedTileAccessIteratorPredicates() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorPredicates( /// Extent of tensor TensorCoord extent) : extent_(extent) { } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iteration_vector_ = index % kAccessesPerVector; int residual_access = index / kAccessesPerVector; iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous; iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedTileAccessIteratorPredicates &operator++() { return *this; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = enable ? 0u : predicates_[i]; } } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = 0xffffffff; } } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = mask[i]; } } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { mask[i] = predicates_[i]; } } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() const { int pred_idx = iteration_vector_ + kAccessesPerVector * (iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous); int word_idx = pred_idx / kPredicatesPerWord; int residual = pred_idx % kPredicatesPerWord; int byte_idx = residual / kPredicatesPerByte; int bit_idx = residual % kPredicatesPerByte; bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0; return pred; } }; //////////////////////////////////////////////////////////////////////////////// /// PredicatedTileAccessIterator /// template <typename Shape, typename Element, typename Layout, int AdvanceRank, typename ThreadMap, typename AccessType, bool Gather = false, typename PermuteLayout = layout::NoPermute> class PredicatedTileAccessIterator; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for pitch-linear data. /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, bool Gather, typename PermuteLayout> class PredicatedTileAccessIterator<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, AccessType_, Gather, PermuteLayout> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::PitchLinear; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates< Shape, Element, Layout, AdvanceRank, ThreadMap, AccessType>; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); static bool constexpr Permute = !platform::is_same<PermuteLayout, layout::NoPermute>::value && !platform::is_same<PermuteLayout, layout::InversePermute<layout::NoPermute>>::value; using Mask = typename UnderlyingPredicates::Mask; /// Uses a non-template class struct Params : PredicatedTileAccessIteratorParams { using Base = PredicatedTileAccessIteratorParams; /// Default constructor Params() = default; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : Base(layout.stride(0), MakePredicatedTileAccessIteratorDesc<Shape, Element, Layout, kAdvanceRank, ThreadMap>()() ) { } CUTLASS_HOST_DEVICE Params(Base const &base) : Base(base) { } }; private: /// Internal pointer type permits fast address arithmetic using BytePointer = char *; private: // // Data members // UnderlyingPredicates the_predicates; /// Parameters object with precomputed internal state Params params_; /// Internal pointer to first access of tile BytePointer pointer_; /// Used for out-of-order visitation bool is_residue_tile_; /// Below is used when Gather is turned on. We need to record strided_offset /// and contiguous_offset separated to compute the offset by using /// /// offset = contiguous_offset + indices[strided_offset] /// Gather indices int const *indices_; /// Function to perform layout permutation and offset computation PermuteLayout permute_layout_; /// Tracks thread's coordinate offset in the matrix for current tile. /// This is only used in the following cases: /// - when Gather is true, strided coordinate needed to access indices (contiguous offset is tracked via pointer_) /// - when Permute is true, both coordinates are neeeded as input into permutation function (pointer_ is fixed) TensorCoord coord_offset_; private: /// Computes predicates based on internally tracked per-thread offset. CUTLASS_DEVICE void compute_predicates_( /// Extent of the matrix window TensorCoord extent, /// optionally, simplify predicate calculation during 'steady state' phase bool is_steady_state = false) { the_predicates.compute_predicates_(extent, is_steady_state); } public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const &threadblock_offset, /// Gather indices int const *indices = nullptr) : params_(params), pointer_(reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(pointer))), the_predicates(extent), is_residue_tile_(true), indices_(indices), permute_layout_(TensorCoord(extent.contiguous(), extent.strided()), params.stride_) { the_predicates.set_predicates(thread_id, threadblock_offset); if (Gather) { assert(indices_); } // update internal pointers Layout layout(params_.stride_); if (!Gather && !Permute) { add_pointer_offset(layout(the_predicates.thread_offset_)); } else { coord_offset_ = the_predicates.thread_offset_; if (!Permute) { add_pointer_offset(layout(make_Coord(coord_offset_.contiguous(), 0))); } } } /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { the_predicates.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += sizeof_bits<Element>::value * pointer_offset / 8; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_DEVICE void add_tile_offset( TensorCoord const &tile_offset) { if (is_residue_tile_) { the_predicates.thread_offset_ += the_predicates.residue_offset_; the_predicates.compute_predicates_(the_predicates.extent_, true); Layout layout(params_.stride_); if (!Gather && !Permute) { add_pointer_offset(layout(the_predicates.residue_offset_)); if (kAdvanceRank) { pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided() - 1); pointer_ += Shape::kContiguous * tile_offset.contiguous() * sizeof_bits<Element>::value / 8; } else { pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous() - 1); pointer_ += Shape::kStrided * tile_offset.strided() * sizeof_bits<Element>::value / 8; } } else { coord_offset_.strided() = the_predicates.thread_offset_.strided() + Shape::kStrided * (tile_offset.strided() - kAdvanceRank); if (!Permute) { add_pointer_offset(layout(make_Coord(the_predicates.residue_offset_.contiguous(), 0))); add_pointer_offset(Shape::kContiguous * (tile_offset.contiguous() - (1 - kAdvanceRank))); } else { coord_offset_.contiguous() = the_predicates.thread_offset_.contiguous() + Shape::kContiguous * (tile_offset.contiguous() - (1 - kAdvanceRank)); } } } else { if (!Gather && !Permute) { if (kAdvanceRank) { pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided()); pointer_ += Shape::kContiguous * tile_offset.contiguous(); } else { pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous()); pointer_ += Shape::kStrided * tile_offset.strided(); } } else { coord_offset_.strided() += Shape::kStrided * tile_offset.strided(); if (!Permute) { add_pointer_offset(Shape::kContiguous * tile_offset.contiguous()); } else { coord_offset_.contiguous() += Shape::kContiguous * tile_offset.contiguous(); } } } is_residue_tile_ = false; } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { if (Gather || Permute) { if (!valid()) { return nullptr; } Index coord_contig = (Permute ? coord_offset_.contiguous() : 0) + the_predicates.iteration_contiguous_ * ThreadMap::Delta::kContiguous + the_predicates.iteration_vector_ * AccessType::kElements; Index coord_strided = coord_offset_.strided() + the_predicates.iteration_strided_ * ThreadMap::Delta::kStrided; if (Gather) { coord_strided = indices_[coord_strided]; } LongIndex offset = Permute ? permute_layout_(TensorCoord(coord_contig, coord_strided)) : (coord_strided * LongIndex(params_.stride_) + coord_contig); return reinterpret_cast<AccessType *>(pointer_ + OffsetBytes<Element>(offset)); } return reinterpret_cast<AccessType *>( pointer_ + the_predicates.iteration_contiguous_ * (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value) / 8) + the_predicates.iteration_vector_; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { the_predicates.operator++(); ++the_predicates.iteration_vector_; if (the_predicates.iteration_vector_ < kAccessesPerVector) { return *this; } the_predicates.iteration_vector_ = 0; ++the_predicates.iteration_contiguous_; if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } // Enter here only if (iteration_contiguous_ == ThreadMap::Iteration::kContiguous) the_predicates.iteration_contiguous_ = 0; ++the_predicates.iteration_strided_; if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) { if (!Gather && !Permute) { pointer_ += params_.inc_strided_; } return *this; } // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) // which means we enter the next tile. the_predicates.iteration_strided_ = 0; if (!Gather && !Permute) { // advance to next tile pointer_ += params_.inc_next_; // now return to start tile - if the iterator is subsequently advanced, this // subtraction as well as the subsequent integer addition are both elided by // the compiler. pointer_ -= params_.inc_advance_; } return *this; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { the_predicates.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { the_predicates.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { the_predicates.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { the_predicates.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() const { return the_predicates.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for column-major data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, bool Gather, typename PermuteLayout> class PredicatedTileAccessIterator<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, AccessType_, Gather, PermuteLayout> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = PredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType, Gather, PermuteLayout>; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default constructor Params() = default; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))){}; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const &base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset, int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization ) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.row(), extent.column()), thread_id, layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()), indices) {} /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for row-major data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, bool Gather, typename PermuteLayout> class PredicatedTileAccessIterator<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, AccessType_, Gather, PermuteLayout> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = PredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType, Gather, PermuteLayout>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default constructor Params() = default; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))){}; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const &base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset, /// Gather indices int const *indices = nullptr) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.column(), extent.row()), thread_id, layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()), indices) {} /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for affine rank 2 data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIterator<Shape_, Element_, layout::AffineRankN<2>, AdvanceRank, ThreadMap_, AccessType_, false, layout::NoPermute> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::AffineRankN<2>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates< Shape, Element, layout::PitchLinear, AdvanceRank, ThreadMap, AccessType>; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingPredicates::Mask; /// Parameters object is precomputed state and is host-constructible class Params { public: friend PredicatedTileAccessIterator; private: /// stride of pitch-linear layout (units of Element) Coord<Layout::kStrideRank, Layout::LongIndex> stride_; /// amount (in byte) to increment pointer to move to next access along /// contiguous dimension LongIndex inc_contiguous_; /// amount (in byte) to increment pointer from first access of current /// contiguous dimension to first access of next one. LongIndex inc_strided_; /// amount (in byte) to increment pointer from last access of current /// contiguous dimension to first access of next one. LongIndex inc_next_strided_; /// amount (in byte) to increment pointer from last access to first access /// of next tile LongIndex inc_next_; /// amount (in byte) to increment pointer from first access of current tile /// to first access of next tile LongIndex inc_advance_; public: // Default ctor CUTLASS_HOST_DEVICE Params(): stride_(0), inc_contiguous_(0), inc_strided_(0), inc_next_(0), inc_advance_(0) { } /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : stride_({layout.stride(0), layout.stride(1)}) { inc_contiguous_ = (LongIndex(stride_[0]) * ThreadMap::Delta::kContiguous) * sizeof_bits<Element>::value / 8; inc_strided_ = (LongIndex(stride_[1]) * ThreadMap::Delta::kStrided) * sizeof_bits<Element>::value / 8; inc_next_strided_ = inc_strided_ - LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_; if (kAdvanceRank) { // advance along strided dimension inc_advance_ = Shape::kStrided * LongIndex(stride_[1]) * sizeof_bits<Element>::value / 8; } else { // advance along contiguous dimension inc_advance_ = Shape::kContiguous * stride_[0] * sizeof_bits<Element>::value / 8; } inc_next_ = inc_advance_ - LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_ - LongIndex(ThreadMap::Iterations::kStrided - 1) * inc_strided_; }; }; private: /// Internal pointer type permits fast address arithmetic using BytePointer = char *; // // Data members // /// Parameters object with precomputed internal state Params params_; /// Internal pointer to first access of tile BytePointer pointer_; UnderlyingPredicates the_predicates; /// Used for out-of-order visitation bool is_residue_tile_; private: /// Computes predicates based on internally tracked per-thread offset. CUTLASS_DEVICE void compute_predicates_( /// Extent of the matrix window TensorCoord extent, /// optionally, simplify predicate calculation during 'steady state' phase bool is_steady_state = false) { the_predicates.compute_predicates_(extent, is_steady_state); } public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset, int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization ) : params_(params), pointer_(reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(pointer))), the_predicates(extent), is_residue_tile_(true) { the_predicates.set_predicates(thread_id, threadblock_offset); // update internal pointers Layout layout(params_.stride_); add_pointer_offset(layout(the_predicates.thread_offset_)); } /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { the_predicates.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += sizeof_bits<Element>::value * pointer_offset / 8; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { if (is_residue_tile_) { the_predicates.thread_offset_ += the_predicates.residue_offset_; Layout layout(params_.stride_); add_pointer_offset(layout(the_predicates.residue_offset_)); the_predicates.compute_predicates_(the_predicates.extent_, true); if (kAdvanceRank) { pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1] - 1); pointer_ += Shape::kContiguous * tile_offset[0]; } else { pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0] - 1); pointer_ += Shape::kStrided * tile_offset[1]; } } else { if (kAdvanceRank) { pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1]); pointer_ += Shape::kContiguous * tile_offset[0]; } else { pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0]); pointer_ += Shape::kStrided * tile_offset[1]; } } is_residue_tile_ = false; } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(pointer_) + the_predicates.iteration_vector_; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { the_predicates.operator++(); ++the_predicates.iteration_vector_; if (the_predicates.iteration_vector_ < kAccessesPerVector) { return *this; } the_predicates.iteration_vector_ = 0; ++the_predicates.iteration_contiguous_; if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { pointer_ += params_.inc_contiguous_; return *this; } // Enter here only if (iteration_contiguous_ == // ThreadMap::Iteration::kContiguous) the_predicates.iteration_contiguous_ = 0; ++the_predicates.iteration_strided_; if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) { pointer_ += params_.inc_next_strided_; return *this; } // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) // which means we enter the next tile. the_predicates.iteration_strided_ = 0; // advance to next tile pointer_ += params_.inc_next_; // now return to start tile - if the iterator is subsequently advanced, this // subtraction as well as the subsequent integer addition are both elided by // the compiler. pointer_ -= params_.inc_advance_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { the_predicates.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { the_predicates.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { the_predicates.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { the_predicates.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return the_predicates.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for affine rank 2 column-major data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIterator<Shape_, Element_, layout::AffineRank2ColumnMajor, AdvanceRank, ThreadMap_, AccessType_, false, layout::NoPermute> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::AffineRank2ColumnMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; // Map to the underlying AffineRankN<2> layout using UnderlyingIterator = PredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::AffineRankN<2>, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default constructor Params() = default; /// Construct the Params object given an AffineRankN<2> tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1))){}; }; private: // // Data members // /// Underlying AffineRankN<2> tile iterator UnderlyingIterator iterator_; public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset, int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization ) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.row(), extent.column()), thread_id, layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())) {} /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset(make_Coord(tile_offset.row(), tile_offset.column())); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for affine rank-2 row-major data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIterator<Shape_, Element_, layout::AffineRank2RowMajor, AdvanceRank, ThreadMap_, AccessType_, false, layout::NoPermute> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::AffineRank2RowMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; // Map to the underlying AffineRankN<2> layout using UnderlyingIterator = PredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::AffineRankN<2>, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default constructor Params() = default; /// Construct the Params object given an AffineRankN<2> tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0))){}; }; private: // // Data members // /// Underlying AffineRankN<2> tile iterator UnderlyingIterator iterator_; public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset, int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization ) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.column(), extent.row()), thread_id, layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())) {} /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset(make_Coord(tile_offset.column(), tile_offset.row())); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for column-major interleaved data. /// It is mapped to the congruous layout. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, int InterleavedK> class PredicatedTileAccessIterator<Shape_, Element_, layout::ColumnMajorInterleaved<InterleavedK>, AdvanceRank, ThreadMap_, AccessType_, false, layout::NoPermute> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; static int const kInterleavedK = InterleavedK; using Layout = layout::ColumnMajorInterleaved<kInterleavedK>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = PredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kRow * kInterleavedK, Shape::kColumn / kInterleavedK>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default constructor Params() = default; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))) {} CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const &base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const &threadblock_offset, int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization ) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.row() * kInterleavedK, extent.column() / kInterleavedK), thread_id, layout::PitchLinearCoord( threadblock_offset.row() * kInterleavedK, threadblock_offset.column() / kInterleavedK)) {} /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator for row-major interleaved data. // It is mapped to the congruous layout. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_, int InterleavedK> class PredicatedTileAccessIterator<Shape_, Element_, layout::RowMajorInterleaved<InterleavedK>, AdvanceRank, ThreadMap_, AccessType_, false, layout::NoPermute> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; static int const kInterleavedK = InterleavedK; using Layout = layout::RowMajorInterleaved<kInterleavedK>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = PredicatedTileAccessIterator< layout::PitchLinearShape<Shape::kColumn * kInterleavedK, Shape::kRow / kInterleavedK>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>; static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default constructor Params() = default; /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))) {} CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const &base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Default constructor PredicatedTileAccessIterator() = default; /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const &threadblock_offset, int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization ) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.column() * kInterleavedK, extent.row() / kInterleavedK), thread_id, layout::PitchLinearCoord( threadblock_offset.column() * kInterleavedK, threadblock_offset.row() / kInterleavedK)) {} /// Construct a PredicatedTileAccessIterator with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator operator++(int) { PredicatedTileAccessIterator self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/transform/threadblock/predicated_tile_access_iterator.h/0
{ "file_path": "include/cutlass/transform/threadblock/predicated_tile_access_iterator.h", "repo_id": "include", "token_count": 25112 }
37
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of tiles from pitch-linear rank=2 tensors. This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile first, with the objective of minimizing predicate mask updates during steady-state operation. A precomputed "Params" object minimizes the amount of state that must be stored in registers, and integer addition is used to advance the pointer through memory. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "regular_tile_iterator.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape, typename Element, typename Layout, int AdvanceRank, typename ThreadMap, int Alignment = sizeof_bits<Element>::value * ThreadMap::kElementsPerAccess / 8 > class RegularTileIterator2dThreadTile; /// Regular tile iterator specialized for pitch-linear + 2d thread-tiled threadmapping template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment > class RegularTileIterator2dThreadTile<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, Alignment> { public: using Shape = Shape_; using Element = Element_; using Layout = layout::PitchLinear; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using StrideIndex = typename Layout::Stride::Index; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>; static_assert(kAdvanceRank == 0 || kAdvanceRank == 1, "Advance rank may only be along the contiguous or strided dimensions."); private: // // Types // using AccessType = AlignedArray<Element, ThreadMap::ThreadAccessShape::kCount, kAlignment>; // // Data members // /// Pointer to memory uint8_t *pointer_; /// Stride quantity StrideIndex stride_; /// Amount to increment pointer along strided dimension LongIndex increment_strided_; /// Amount to advance pointer between tiles LongIndex increment_advance_; public: CUTLASS_DEVICE RegularTileIterator2dThreadTile(): pointer_(nullptr), increment_strided_(0), increment_advance_(0) { } CUTLASS_DEVICE RegularTileIterator2dThreadTile( TensorRef const &ref, int thread_idx, int interleave ){ TensorCoord t = ThreadMap::initial_offset(thread_idx); long int offset = t[0] * interleave + t[1] * ref.stride()[0]/interleave; pointer_ = reinterpret_cast<uint8_t *>(ref.data() + offset); stride_ = ref.stride()[0] / interleave; increment_strided_ = (ref.stride()[0] * sizeof_bits<Element>::value / 8) * ThreadMap::Delta::kStrided / interleave; increment_advance_ = (kAdvanceRank == 0 ? Shape::kContiguous * sizeof_bits<Element>::value / 8 : Shape::kStrided * (ref.stride()[0] * sizeof_bits<Element>::value / 8) / interleave); } /// Loads a fragment CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); uint8_t const *byte_pointer = pointer_ + pointer_offset * sizeof_bits<Element>::value / 8; CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_pointer); CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int idx = c + s * ThreadMap::Iterations::kContiguous; frag_ptr[idx] = access_ptr[c * ThreadMap::Delta::kContiguous / ThreadMap::ThreadAccessShape::kStrided]; } if (s + 1 < ThreadMap::Iterations::kStrided) { byte_pointer += increment_strided_; } } } /// Loads a fragment CUTLASS_HOST_DEVICE void load(Fragment &frag, TensorCoord const & tile_offset) { load_with_pointer_offset( frag, tile_offset.contiguous() * Shape::kContiguous / ThreadMap::kElementsPerAccess + tile_offset.strided() * Shape::kStrided * stride_ ); } /// Loads a fragment CUTLASS_HOST_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Stores a fragment CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const*>(&frag); uint8_t *byte_pointer = pointer_ + pointer_offset * sizeof_bits<Element>::value / 8; CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_pointer); CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int idx = c + s * ThreadMap::Iterations::kContiguous; access_ptr[c * ThreadMap::Delta::kContiguous / ThreadMap::ThreadAccessShape::kStrided] = frag_ptr[idx]; } if (s + 1 < ThreadMap::Iterations::kStrided) { byte_pointer += increment_strided_; } } } /// Stores a fragment CUTLASS_HOST_DEVICE void store(Fragment const &frag, TensorCoord const & tile_offset) { store_with_pointer_offset( frag, tile_offset.contiguous() * Shape::kContiguous + tile_offset.strided() * Shape::kStrided * stride_ ); } /// Stores a fragment CUTLASS_HOST_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } /// Advances the pointer CUTLASS_HOST_DEVICE RegularTileIterator2dThreadTile &operator++() { pointer_ += increment_advance_; return *this; } /// Advances the pointer CUTLASS_HOST_DEVICE RegularTileIterator2dThreadTile &operator--() { pointer_ -= increment_advance_; return *this; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset; } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { int offset = sizeof_bits<Element>::value * (coord.contiguous() * Shape::kContiguous + coord.strided() * Shape::kStrided * stride_) / 8; add_pointer_offset(offset); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Regular tile iterator specialized for interleaved layout + 2d thread-tiled threadmapping template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment > class RegularTileIterator2dThreadTile<Shape_, Element_, layout::RowMajorInterleaved<4>, AdvanceRank, ThreadMap_, Alignment> { public: using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajorInterleaved<4>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>; using Underlying = RegularTileIterator2dThreadTile< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, kAlignment >; static_assert(kAdvanceRank == 0 || kAdvanceRank == 1, "Advance rank may only be along the row or column dimensions."); private: Underlying iterator_; public: CUTLASS_DEVICE RegularTileIterator2dThreadTile() { } CUTLASS_DEVICE RegularTileIterator2dThreadTile( TensorRef const &ref, int thread_idx ): iterator_({ref.data(), ref.stride()}, thread_idx, 4) { } /// Loads a fragment CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment CUTLASS_HOST_DEVICE void load(Fragment &frag, TensorCoord const & tile_offset) { iterator_.load_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()}); } /// Loads a fragment CUTLASS_HOST_DEVICE void load(Fragment &frag) { iterator_.load_with_pointer_offset(frag, 0); } /// Stores a fragment CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Stores a fragment CUTLASS_HOST_DEVICE void store(Fragment const &frag, TensorCoord const & tile_offset) { iterator_.store_with_pointer_offset(frag, {tile_offset.column(), tile_offset.row()}); } /// Stores a fragment CUTLASS_HOST_DEVICE void store(Fragment const &frag) { iterator_.store_with_pointer_offset(frag, 0); } /// Advances the pointer CUTLASS_HOST_DEVICE RegularTileIterator2dThreadTile &operator++() { ++iterator_; return *this; } /// Advances the pointer CUTLASS_HOST_DEVICE RegularTileIterator2dThreadTile &operator--() { --iterator_; return *this; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.column(), coord.row()}); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Regular tile iterator specialized for interleaved layout + 2d thread-tiled threadmapping template < typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment > class RegularTileIterator2dThreadTile<Shape_, Element_, layout::ColumnMajorInterleaved<4>, AdvanceRank, ThreadMap_, Alignment> { public: using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajorInterleaved<4>; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Fragment = Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>; using PitchLinearThreadMap = PitchLinearStripminedThreadMap< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, ThreadMap::kThreads, ThreadMap::ThreadAccessShape::kCount >; using Underlying = RegularTileIterator2dThreadTile< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap >; static_assert(kAdvanceRank == 0 || kAdvanceRank == 1, "Advance rank may only be along the row or column dimensions."); private: Underlying iterator_; public: CUTLASS_DEVICE RegularTileIterator2dThreadTile() { } CUTLASS_DEVICE RegularTileIterator2dThreadTile( TensorRef const &ref, int thread_idx ): iterator_({ref.data(), ref.stride()}, thread_idx, 4) { } /// Loads a fragment CUTLASS_HOST_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment CUTLASS_HOST_DEVICE void load(Fragment &frag, TensorCoord const & tile_offset) { iterator_.load_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()}); } /// Loads a fragment CUTLASS_HOST_DEVICE void load(Fragment &frag) { iterator_.load_with_pointer_offset(frag, 0); } /// Stores a fragment CUTLASS_HOST_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Stores a fragment CUTLASS_HOST_DEVICE void store(Fragment const &frag, TensorCoord const & tile_offset) { iterator_.store_with_pointer_offset(frag, {tile_offset.row(), tile_offset.column()}); } /// Stores a fragment CUTLASS_HOST_DEVICE void store(Fragment const &frag) { iterator_.store_with_pointer_offset(frag, 0); } /// Advances the pointer CUTLASS_HOST_DEVICE RegularTileIterator2dThreadTile &operator++() { ++iterator_; return *this; } /// Advances the pointer CUTLASS_HOST_DEVICE RegularTileIterator2dThreadTile &operator--() { --iterator_; return *this; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.row(), coord.column()}); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass
include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h/0
{ "file_path": "include/cutlass/transform/threadblock/regular_tile_iterator_pitch_linear_2dthreadtile.h", "repo_id": "include", "token_count": 5171 }
38
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# import ctypes from typing import Union from cuda import cuda from cutlass_library import SubstituteTemplate import numpy as np from cutlass_library import ( ConvKindNames, ConvKindTag, DataTypeNames, DataTypeSize, DataTypeTag, IteratorAlgorithmNames, IteratorAlgorithmTag, LayoutTag, LayoutType, MathOperation, MathOperationTag, OpcodeClass, OpcodeClassNames, OpcodeClassTag, OperationKind, ShortDataTypeNames, ShortLayoutTypeNames, SplitKMode, StrideSupport, StrideSupportTag, SwizzlingFunctor, SwizzlingFunctorTag, get_complex_from_real, ) from cutlass.backend.arguments import ArgumentBase from cutlass.backend.c_types import dim3_, get_conv2d_arguments from cutlass.backend.library import ( EmissionType, TensorDescription, TileDescription, ) from cutlass.backend.memory_manager import device_mem_alloc from cutlass.backend.operation import ExecutableOperation, LaunchConfiguration from cutlass.backend.utils.device import to_device_ptr from cutlass.shape import GemmCoord class Conv2dArguments(ArgumentBase): """ Argument wrapper for Conv2d. It encodes problem information and user-provide tensors into the kernel's argument. :param operation: the Conv2d operation to take the argument :type operation: :class:`cutlass.backend.Conv2dOperation` :param problem_size: the Conv2d problem size :type problem_size: :class:`cutlass.shape.Conv2dProblemSize` :param A: tensor A :type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray :param B: tensor B :type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray :param C: tensor C :type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray :param D: tensor D :type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray :param split_k_mode: conv2d split K mode, defaults to cutlass_library.library.SplitKMode.Serial :type split_k_mode: cutlass_library.library.SplitKMode, optional :param output_op: output operator, optional :type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments` :param stream: cuda stream, defaults to cuda.cuda.CUstream(0) :type stream: :class:`cuda.cuda.CUstream` """ def __init__(self, operation, problem_size, A, B, C, D, split_k_mode=SplitKMode.Serial, **kwargs, ) -> None: self.operation = operation self.conv_kind = operation.conv_kind self.layout_A = operation.A.layout self.layout_B = operation.B.layout self.layout_C = operation.C.layout self.element_A = operation.A.element self.element_B = operation.B.element self.element_C = operation.C.element if self.layout_C == LayoutType.TensorNC32HW32: raise Exception("Layout type TensorNC32HW32 is not currently supported") super().__init__(A, B, C, D, **kwargs) if "split_k_slices" in kwargs.keys() and kwargs["split_k_slices"] > 1: self.split_k_mode = split_k_mode self.split_k_slices = kwargs["split_k_slices"] else: self.split_k_mode = SplitKMode.Serial self.split_k_slices = 1 if "output_op" in kwargs.keys() and self.split_k_mode != SplitKMode.Parallel: self.output_op = kwargs["output_op"] else: self.output_op = self.operation.epilogue_type(1.0, 0.0) self.problem_size = problem_size self.problem_size.split_k_slices = self.split_k_slices self.initialize() def get_arguments(self): tc_numel = -1 if hasattr(self, "tensor_c_numel"): tc_numel = self.tensor_c_numel self.c_arguments = self.operation.argument_type( int(self.conv_kind), self.problem_size.ctype, int(to_device_ptr(self.ptr_A)), int(to_device_ptr(self.ptr_B)), int(to_device_ptr(self.ptr_C)), int(to_device_ptr(self.ptr_D)), tc_numel, self.output_op, int(self.split_k_mode) ) def initialize(self): self.launch_config = self.operation.rt_module.plan(self) self.get_arguments() # Allocate and initialize device workspace device_workspace_size = self.operation.rt_module.get_workspace_size(self.c_arguments) if device_workspace_size > 0: self.workspace_buffer = device_mem_alloc(device_workspace_size) workspace_ptr = self.workspace_buffer.ptr err, = cuda.cuMemsetD32( workspace_ptr, 0, device_workspace_size // 4) else: workspace_ptr = None self.semaphore = 0 if workspace_ptr is not None and self.split_k_mode == SplitKMode.Parallel: self.ptr_D = workspace_ptr # Reset arguments now that ptr_D has been updated self.get_arguments() elif workspace_ptr is not None and self.split_k_mode == SplitKMode.Serial: self.semaphore = workspace_ptr params_ = self.operation.rt_module.get_args( self.c_arguments, ctypes.c_void_p(int(self.semaphore))) self.host_workspace = bytearray(params_.contents) self.device_workspace = None def sync(self): """ Synchronize the arguments. If the input tensor is in host, copy it from device to host. """ return super().sync() class Conv2dRT(ExecutableOperation): """ Conv2dRT manages the CUTLASS runtime components """ KernelTemplate = r""" extern "C" __global__ void ${operation_name}(${operation_name}${operation_suffix}::Params params) { // Dynamic shared memory base pointer extern __shared__ int SharedStorageBase[]; // Declare pointer to dynamic shared memory. ${operation_name}${operation_suffix}::SharedStorage *shared_storage = reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase); ${operation_name}${operation_suffix} op; op(params, *shared_storage); } """ HostTemplate = r""" extern "C" { // Get the size of params in bytes int ${operation_name}_get_param_size(){ return sizeof(${operation_name}${operation_suffix}::Params); } // Get the size of dynamic shared memory in bytes int ${operation_name}_shared_memory_size() { return int(sizeof(${operation_name}${operation_suffix}::SharedStorage)); } using ElementA = typename ${operation_name}_base::ElementA; using ElementB = typename ${operation_name}_base::ElementB; using ElementC = typename ${operation_name}_base::ElementC; using LayoutA = typename ${operation_name}_base::LayoutA; using LayoutB = typename ${operation_name}_base::LayoutB; using LayoutC = typename ${operation_name}_base::LayoutC; using EpilogueOutputOp = typename ${operation_name}_base::EpilogueOutputOp; struct ${operation_name}_TemporaryArgs { int conv_kind; cutlass::conv::Conv2dProblemSize problem_size; ElementA* ptr_A; ElementB* ptr_B; ElementC* ptr_C; ElementC* ptr_D; int tensor_c_numel; typename EpilogueOutputOp::Params epilogue_params; int split_k_mode; }; typename ${operation_name}${operation_suffix}::Arguments construct_arguments(${operation_name}_TemporaryArgs args) { cutlass::conv::Operator conv_operator = static_cast<cutlass::conv::Operator>(args.conv_kind); auto tc_A = cutlass::conv::implicit_gemm_tensor_a_extent(conv_operator, args.problem_size); auto tc_B = cutlass::conv::implicit_gemm_tensor_b_extent(conv_operator, args.problem_size); auto tc_C = cutlass::conv::implicit_gemm_tensor_c_extent(conv_operator, args.problem_size); auto tc_D = cutlass::conv::implicit_gemm_tensor_c_extent(conv_operator, args.problem_size); auto size_C = tc_C.at(0) * tc_C.at(1) * tc_C.at(2) * tc_C.at(3); if (args.tensor_c_numel >= 0 && args.tensor_c_numel == tc_C.at(3) && args.tensor_c_numel < size_C) { // C is interpreted as bias tc_C = {0, 0, 0, 0}; } cutlass::TensorRef<ElementA, LayoutA> tref_A(args.ptr_A, LayoutA::packed(tc_A)); cutlass::TensorRef<ElementB, LayoutA> tref_B(args.ptr_B, LayoutB::packed(tc_B)); cutlass::TensorRef<ElementC, LayoutA> tref_C(args.ptr_C, LayoutC::packed(tc_C)); cutlass::TensorRef<ElementC, LayoutA> tref_D(args.ptr_D, LayoutC::packed(tc_D)); return { args.problem_size, tref_A, tref_B, tref_C, tref_D, args.epilogue_params, static_cast<cutlass::conv::SplitKMode>(args.split_k_mode) }; } // Get the params as byte array char* ${operation_name}_get_params(${operation_name}_TemporaryArgs args, int *semaphore=nullptr) { auto arguments = construct_arguments(args); typename ${operation_name}${operation_suffix}::Params* params; params = new ${operation_name}${operation_suffix}::Params(arguments, semaphore); char *bytes = ((char*)(params)); char *output = new char[sizeof(${operation_name}${operation_suffix}::Params)]; for (unsigned int i = 0; i < sizeof(${operation_name}${operation_suffix}::Params); i ++) output[i] = bytes[i]; return output; } dim3 ${operation_name}_get_grid_shape( int conv_kind, cutlass::conv::Conv2dProblemSize problem_size, cutlass::gemm::GemmCoord tile_size, int split_k_slices ) { using Swizzle = typename ${operation_name}_base::ThreadblockSwizzle; auto tiled_shape = Swizzle::get_tiled_shape( static_cast<cutlass::conv::Operator>(conv_kind), problem_size, tile_size, split_k_slices); return Swizzle::get_grid_shape(tiled_shape); } size_t ${operation_name}_get_workspace_size(${operation_name}_TemporaryArgs args) { auto arguments = construct_arguments(args); // Temporarily define device::-level Conv2d so that we can call get_workspace_size using DeviceConv = cutlass::conv::device::ImplicitGemmConvolution<${operation_name}_base>; return DeviceConv::get_workspace_size(arguments); } } """ def __init__(self, operation: "Conv2dOperation"): super().__init__(operation) self.extra_funcs = { "get_grid_shape": dim3_, "get_workspace_size": ctypes.c_uint64 } self.argument_type, self.epilogue_type = get_conv2d_arguments(operation.epilogue_functor) self.argtype = [ctypes.POINTER(self.argument_type), ctypes.c_void_p] self.conv_kind = operation.conv_kind self.operation: Conv2dOperation = operation self.emitter = EmitConv2dInstance("_type") self.threads = operation.tile_description.num_threads self.swizzle_functor = operation.swizzling_functor def emit(self): return self.emitter.emit(self.operation) def plan(self, arguments: Conv2dArguments): tile_size = GemmCoord( self.operation.tile_description.threadblock_shape[0], self.operation.tile_description.threadblock_shape[1], self.operation.tile_description.threadblock_shape[2], ) grid = self.get_grid_shape( int(self.conv_kind), arguments.problem_size.ctype, tile_size.ctype, arguments.split_k_slices ) return LaunchConfiguration( [grid.x, grid.y, grid.z], [self.threads, 1, 1], self.shared_memory_capacity) def initialize(self): err, = cuda.cuFuncSetAttribute( self.kernel, attrib=cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, value=self.shared_memory_capacity) if err != cuda.CUresult.CUDA_SUCCESS: raise RuntimeError(f"CUDA Error: {err}") class Conv2dOperation: """ CUTLASS Conv2d operation description. :param conv_kind: convolution operator :type conv_kind: :class:`cutlass_library.library.ConvKind` :param iterator_algorithm: Selects among several implementation variants trading off performance with simplicity :type iterator_algorithm: :class:`cutlass_library.library.IteratorAlgorithm` :param arch: GPU compute capability (sm_xx) :type arch: int :param tile_description: tile description :type tile_description: :class:`cutlass.backend.TileDescription` :param A: tensor A description :type A: :class:`cutlass.backend.TensorDescription` :param B: tensor B description :type B: :class:`cutlass.backend.TensorDescription` :param C: tensor C description :type C: :class:`cutlass.backend.TensorDescription` :param D: tensor D description :type D: :class:`cutlass.backend.TensorDescription` :param element_epilogue: element type for computation in epilogue \ :type element_epilogue: cutlass_library.library.DataType :param stride_support: distinguish among partial specializations that \ accelerate certain problems where convolution stride is unit \ :type stride_support: :class:`cutlass_library.library.StrideSupport` :param epilogue_functor: convolution epilogue functor :type epilogue_functor: :class:`EpilogueFunctor` :param swizzling_functor: threadblock swizzling functor """ def __init__( self, conv_kind, iterator_algorithm, arch: int, tile_description: TileDescription, A: TensorDescription, B: TensorDescription, C: TensorDescription, stride_support, epilogue_functor, swizzling_functor=SwizzlingFunctor.Identity1, emission_type=EmissionType.Kernel, **kwargs ): self.operation_kind: OperationKind = OperationKind.Conv2d self.arch: int = arch self.tile_description: TileDescription = tile_description self.conv_kind = conv_kind self.A: TensorDescription = A self.B: TensorDescription = B self.C: TensorDescription = C self.epilogue_functor = epilogue_functor self.iterator_algorithm = iterator_algorithm self.stride_support = stride_support self.swizzling_functor = swizzling_functor self.emission_type = emission_type self.rt_module: Conv2dRT = Conv2dRT(self) self.argument_type = self.rt_module.argument_type self.epilogue_type = self.rt_module.epilogue_type def run(self, arguments: Conv2dArguments) -> cuda.CUresult: """ Launch the cuda kernel with input arguments :param arguments: conv2d arguments :type arguments: :class:`cutlass.backend.Conv2dArguments` """ # launch the kernel err = self.rt_module.run( arguments.host_workspace, arguments.device_workspace, arguments.launch_config, arguments.stream ) if err != cuda.CUresult.CUDA_SUCCESS: raise RuntimeError(f"CUDA Error {err}") return err # # Get function name # def procedural_name(self): """The full procedural name indicates architecture, extended name, tile size, and layout.""" return self.configuration_name() def configuration_name(self): """The full procedural name indicates architecture, extended name, tile size, and layout.""" opcode_class_name = OpcodeClassNames[ self.tile_description.math_instruction.opcode_class ] threadblock = "%dx%d_%dx%d" % ( self.tile_description.threadblock_shape[0], self.tile_description.threadblock_shape[1], self.tile_description.threadblock_shape[2], self.tile_description.stages, ) if self.stride_support == StrideSupport.Unity: configuration_name = "cutlass_sm${arch}_${opcode_class}_${extended_name}_${threadblock}_${layout}_unity_stride_align${alignment}" else: configuration_name = "cutlass_sm${arch}_${opcode_class}_${extended_name}_${threadblock}_${layout}_align${alignment}" return SubstituteTemplate( configuration_name, { "arch": str(self.arch), "opcode_class": opcode_class_name, "extended_name": self.extended_name(), "threadblock": threadblock, "layout": self.layout_name(), "alignment": "%d" % self.A.alignment }, ) def extended_name(self): """Append data types if they differ from compute type.""" if self.C.element != self.tile_description.math_instruction.element_accumulator and \ self.A.element != self.tile_description.math_instruction.element_accumulator: extended_name = "${element_c}_${core_name}_${element_a}" elif self.C.element == self.tile_description.math_instruction.element_accumulator and \ self.A.element != self.tile_description.math_instruction.element_accumulator: extended_name = "${core_name}_${element_a}" else: extended_name = "${core_name}" extended_name = SubstituteTemplate(extended_name, { "element_a": DataTypeNames[self.A.element], "element_c": DataTypeNames[self.C.element], "core_name": self.core_name(), }) return extended_name def layout_name(self): return "%s" % (ShortLayoutTypeNames[self.A.layout]) def core_name(self): """The basic operation kind is prefixed with a letter indicating the accumulation type.""" intermediate_type = "" if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp: inst_shape = "%dx%dx%d" % tuple( self.tile_description.math_instruction.instruction_shape) if self.tile_description.math_instruction.element_a != self.A.element and \ self.tile_description.math_instruction.element_a != self.accumulator_type(): intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] else: inst_shape = "" return "%s%s%s%s_%s" % ( ShortDataTypeNames[self.accumulator_type()], inst_shape, intermediate_type, ConvKindNames[self.conv_kind], IteratorAlgorithmNames[self.iterator_algorithm] ) def is_complex(self): complex_operators = [ MathOperation.multiply_add_complex, MathOperation.multiply_add_complex_gaussian, ] return self.tile_description.math_instruction.math_operation in complex_operators def accumulator_type(self): accum = self.tile_description.math_instruction.element_accumulator if self.is_complex(): return get_complex_from_real(accum) return accum def device_op(self): """ Returns a new Conv2dOperation object that is constructed with emission type ``EmissionType.Device``. :return: operation ready for device-level code emission :rtype: Conv2dOperation """ return Conv2dOperation( self.conv_kind, self.iterator_algorithm, self.arch, self.tile_description, self.A, self.B, self.C, self.stride_support, self.epilogue_functor, self.swizzling_functor, emission_type=EmissionType.Device) ################################################################################################### # # Emits single instances of a CUTLASS device-wide operator # ################################################################################################### class EmitConv2dInstance: def __init__(self, operation_suffix=""): self.operation_suffix = operation_suffix self.includes = [ "cutlass/cutlass.h", "cutlass/conv/kernel/default_conv2d_fprop.h", "cutlass/conv/kernel/default_conv2d_dgrad.h", "cutlass/conv/kernel/default_conv2d_wgrad.h", "cutlass/conv/device/implicit_gemm_convolution.h" ] self.template = """ // Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}" using ${operation_name}_base = typename cutlass::conv::kernel::DefaultConv2d${conv_kind_name}< ${element_a}, ${layout_a}, ${element_b}, ${layout_b}, ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}, ${swizzling_functor}, ${stages}, ${math_operator}, ${iterator_algorithm}, ${stride_support}, ${align_a}, ${align_b} >::Kernel; struct ${operation_name}${operation_suffix}: public ${operation_name}_base { }; """ self.template_device = """ // Conv2d operation ${operation_name} using Conv2d${conv_kind_name}Kernel = typename cutlass::conv::kernel::DefaultConv2d${conv_kind_name}< ${element_a}, ${layout_a}, ${element_b}, ${layout_b}, ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}, ${swizzling_functor}, ${stages}, ${math_operator}, ${iterator_algorithm}, ${stride_support}, ${align_a}, ${align_b} >::Kernel; using DeviceKernel = typename cutlass::conv::device::ImplicitGemmConvolution<Conv2d${conv_kind_name}Kernel>; """ def emit(self, operation): warp_shape = [int(operation.tile_description.threadblock_shape[idx] / operation.tile_description.warp_count[idx]) for idx in range(3)] epilogue_vector_length = int(min( operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) values = { "operation_name": operation.procedural_name(), "operation_suffix": self.operation_suffix, "conv_kind": ConvKindTag[operation.conv_kind], "conv_kind_name": ConvKindNames[operation.conv_kind].capitalize(), "element_a": DataTypeTag[operation.A.element], "layout_a": LayoutTag[operation.A.layout], "element_b": DataTypeTag[operation.B.element], "layout_b": LayoutTag[operation.B.layout], "element_c": DataTypeTag[operation.C.element], "layout_c": LayoutTag[operation.C.layout], "element_accumulator": DataTypeTag[operation.accumulator_type()], "opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], "arch": "cutlass::arch::Sm%d" % operation.arch, "threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]), "threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]), "threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]), "warp_shape_m": str(warp_shape[0]), "warp_shape_n": str(warp_shape[1]), "warp_shape_k": str(warp_shape[2]), "instruction_shape_m": str(operation.tile_description.math_instruction.instruction_shape[0]), "instruction_shape_n": str(operation.tile_description.math_instruction.instruction_shape[1]), "instruction_shape_k": str(operation.tile_description.math_instruction.instruction_shape[2]), "epilogue_vector_length": str(epilogue_vector_length), "epilogue_functor": operation.epilogue_functor.emit(), "swizzling_functor": SwizzlingFunctorTag[operation.swizzling_functor], "stages": str(operation.tile_description.stages), "iterator_algorithm": IteratorAlgorithmTag[operation.iterator_algorithm], "iterator_algorithm_name": IteratorAlgorithmNames[operation.iterator_algorithm].capitalize(), "stride_support": StrideSupportTag[operation.stride_support], "math_operator": "cutlass::arch::OpMultiplyAddComplex" if operation.is_complex() else MathOperationTag[operation.tile_description.math_instruction.math_operation], "align_a": str(operation.A.alignment), "align_b": str(operation.B.alignment), } if operation.emission_type == EmissionType.Kernel: conv2d_template = self.template else: conv2d_template = self.template_device return SubstituteTemplate(conv2d_template, values)
python/cutlass/backend/conv2d_operation.py/0
{ "file_path": "python/cutlass/backend/conv2d_operation.py", "repo_id": "python", "token_count": 10771 }
39
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Layout algebras """ from pycute import Layout, composition, make_layout, flatten, product def _infer_split(old_shape, new_shape): old_shape = _tuple_to_list(old_shape) new_shape = _tuple_to_list(new_shape) if len(old_shape) == 0 and len(new_shape) == 0: return [] if len(old_shape) == 0: if product(tuple(new_shape)) != 1: raise ValueError("Invalid reshape size") else: return new_shape if len(new_shape) == 0: if product(tuple(old_shape)) != 1: raise ValueError("Invalid reshape size") else: return old_shape # This is done recursively by only process the last dimension at each time old_dim = old_shape[-1] new_dim = new_shape[-1] # Exact match if old_dim == new_dim: return _infer_split(old_shape[:-1], new_shape[:-1]) + [new_dim,] # Needs split if old_dim > new_dim and old_dim % new_dim == 0: residual = old_dim // new_dim return _infer_split(old_shape[:-1] + [residual,], new_shape[:-1]) + [new_dim,] # Needs merge if old_dim < new_dim and new_dim % old_dim == 0: residual = new_dim // old_dim return _infer_split(old_shape[:-1], new_shape[:-1] + [residual,]) + [old_dim,] raise NotImplementedError(f"Unsupported split: {old_shape} -> {new_shape}") def _infer_merge(flatten_shape, shape): flatten_shape = _tuple_to_list(flatten_shape) shape = _tuple_to_list(shape) idx_flat = 0 merged_shape = [] for dim in shape: # Exact match if dim == flatten_shape[idx_flat]: merged_shape.append(dim) idx_flat += 1 # Need group elif dim > flatten_shape[idx_flat] and dim % flatten_shape[idx_flat] == 0: residual = dim group = [] while(residual > 1): group.append(flatten_shape[idx_flat]) residual = residual // flatten_shape[idx_flat] idx_flat += 1 merged_shape.append(group) else: raise NotImplementedError(f"Unsupported merge: {flatten_shape} -> {shape}") return merged_shape def _list_to_tuple(nested_list): if isinstance(nested_list, list) or isinstance(nested_list, tuple): return tuple(_list_to_tuple(item) for item in nested_list) return nested_list def _tuple_to_list(nested_tuple): if isinstance(nested_tuple, list) or isinstance(nested_tuple, tuple): return list(_tuple_to_list(item) for item in nested_tuple) return nested_tuple def _reverse_tuple(nested_tuple: tuple): if isinstance(nested_tuple, tuple): return tuple([_reverse_tuple(item) for item in nested_tuple][::-1]) return nested_tuple def _get_first_lhs_nonzero_stride(stride_list, idx): for i in reversed(range(idx)): if stride_list[i] != 0: return i else: return None def _get_first_rhs_nonzero_stride(stride_list, idx): for i in range(idx+1, len(stride_list)): if stride_list[i] != 0: return i else: return None def reshape(layout, new_shape): """ General reshape of input layout. It takes two steps: 1. split the dimensions of the old layout 2. merge the splitted dimensions according to the new shape """ # # Step 1: Split the dimensions of the old layout # # 1.1 Flat old and new shape old_flatten_shape = list(flatten(layout.shape)) new_flatten_shape = list(flatten(new_shape)) # 1.2 Infer the flatten splitted shape splitted_flatten_shape = _infer_split(old_flatten_shape, new_flatten_shape) # 1.3 Unflat the splitted shape based on the old shape splited_shape = _infer_merge(splitted_flatten_shape, old_flatten_shape) # 1.4 Infer the type of each split # If the split type is in row-major (R), the dimension list is reversed because # the cute::composition only support column-major split split_type = [] # the type of each split (ColumnMajor or RowMajor) permuted_splitted_shape = [] old_flatten_stride = list(flatten(layout.stride)) for idx, dim in enumerate(splited_shape): if not isinstance(dim, list): permuted_splitted_shape.append(dim) split_type.append("C") else: lhs_stride = _get_first_lhs_nonzero_stride(old_flatten_stride, idx) rhs_stride = _get_first_rhs_nonzero_stride(old_flatten_stride, idx) # Special case for single tuple # Use column-major by default if lhs_stride is None and rhs_stride is None: permuted_splitted_shape.append(dim) split_type.append("C") else: if lhs_stride is not None and rhs_stride is not None: # We consider shape[idx]:stride[idx] # Case 1: stride[idx - 1] <= stride[idx] <= stride[idx + 1]: column major if lhs_stride <= old_flatten_stride[idx] and old_flatten_stride[idx] <= rhs_stride: permuted_splitted_shape.append(dim) split_type.append("C") # Case 2: stride[idx - 1] > stride[idx] > stride[idx + 1]: row major elif lhs_stride > old_flatten_stride[idx] and old_flatten_stride[idx] > rhs_stride: permuted_splitted_shape.append([d for d in reversed(dim)]) split_type.append("R") # Case 3: stride[idx - 1] <= stride[idx] > stride[idx + 1]: concave elif lhs_stride <= old_flatten_stride[idx] and old_flatten_stride[idx] > rhs_stride: if lhs_stride >= rhs_stride: permuted_splitted_shape.append(dim) split_type.append("C") else: permuted_splitted_shape.append([d for d in reversed(dim)]) split_type.append("R") # Case 4: stride[idx - 1] > stride[idx] <= stride[idx + 1]: concave elif lhs_stride > old_flatten_stride[idx] and old_flatten_stride[idx] <= rhs_stride: if lhs_stride >= rhs_stride: permuted_splitted_shape.append(dim) split_type.append("C") else: permuted_splitted_shape.append([d for d in reversed(dim)]) split_type.append("R") else: raise NotImplementedError() elif lhs_stride is None: # Case 1: dim's stride < dim+1's stride, expand in column major if old_flatten_stride[idx] > rhs_stride: permuted_splitted_shape.append([d for d in reversed(dim)]) split_type.append("R") else: permuted_splitted_shape.append(dim) split_type.append("C") else: # Case 1: dim's stride > dim-1's stride if old_flatten_stride[idx] < lhs_stride: permuted_splitted_shape.append([d for d in reversed(dim)]) split_type.append("R") else: permuted_splitted_shape.append(dim) split_type.append("C") # 1.4 Generate the splitted layout permuted_splitted_layout = composition(layout, Layout(_list_to_tuple(permuted_splitted_shape))) # 1.5 Reverse the permutation in 1.4 before merge splitted_shape = [] splitted_stride = [] for shape_dim, stride_dim, type in zip( permuted_splitted_layout.shape, permuted_splitted_layout.stride, split_type): if type == "C": splitted_shape.append(shape_dim) splitted_stride.append(stride_dim) else: splitted_shape.append(tuple([d for d in reversed(shape_dim)])) splitted_stride.append(tuple([d for d in reversed(stride_dim)])) splitted_layout = Layout(tuple(splitted_shape), tuple(splitted_stride)) # # Step 2: Merge the splitted dimensions according to the new shape # # 2.1 Merge layout merged_layout = composition(splitted_layout, Layout(new_shape)) # 2.2 Cleaning up output_layout = composition(merged_layout, Layout(new_shape)) return output_layout def permutation(layout, permutation): """ Permute the layout """ new_shape = tuple([layout.shape[idx] for idx in permutation]) new_stride = tuple([layout.stride[idx] for idx in permutation]) return Layout(new_shape, new_stride) def _broadcast(layout, new_shape): if len(layout) == 1 and isinstance(new_shape, int): old_dim = layout.shape old_stride = layout.stride new_dim = new_shape if old_dim == new_dim: return Layout(old_dim, old_stride) elif old_dim == 1: return Layout(new_dim, 0) else: raise NotImplementedError(f"Invalid Broadcast: {old_dim} -> {new_dim}") # Align the dimensions old_shape = layout.shape if isinstance(old_shape, int): old_shape = (old_shape,) sub_layouts = [layout,] else: sub_layouts = [sub_layout for sub_layout in layout] rhs_broadcast_layouts = [Layout(1, 0)] * (len(new_shape) - len(old_shape)) # Get the broadcasted layout broadcast_layouts = [] try: layout = make_layout(*sub_layouts, *rhs_broadcast_layouts) broadcast_layouts = [] for idx, sub_layout in enumerate(layout): broadcast_layouts.append(_broadcast(sub_layout, new_shape[idx])) except NotImplementedError: layout = make_layout(*rhs_broadcast_layouts, *sub_layouts) for idx, sub_layout in enumerate(layout): broadcast_layouts.append(_broadcast(sub_layout, new_shape[idx])) return make_layout(*broadcast_layouts) def broadcast(layout, new_shape): """ Broadcast the new layout based on the input shape The broadcasted shape equals to the new shape The stride of broadcasted dimensions are 0 """ return _broadcast(layout, new_shape) def debroadcast(layout, dims): """ Squeeze the 0-stride """ for dim in dims: if layout.stride[dim] != 0: raise ValueError(f"Dim{dim} cannot be debroadcasted as it has stride {layout.stride[dim]}") new_shape = tuple([s for idx, s in enumerate(layout.shape) if idx not in dims]) new_stride = tuple([s for idx, s in enumerate(layout.stride) if idx not in dims]) return Layout(new_shape, new_stride) def canonicalization_(shapes, strides): if isinstance(shapes, tuple): c_shapes = [] c_strides = [] for shape, stride in zip(shapes, strides): c_shape, c_stride = canonicalization_(shape, stride) c_shapes.append(c_shape) c_strides.append(c_stride) return tuple(c_shapes), tuple(c_strides) else: if shapes == 1: return 1, 0 else: return shapes, strides def canonicalization(layout): """ Canonicalize the input layout 1. set the stride of shape "1" to 0 """ new_shape, new_stride = canonicalization_(layout.shape, layout.stride) return Layout(new_shape, new_stride)
python/cutlass/backend/evt/ir/layout_algorithm.py/0
{ "file_path": "python/cutlass/backend/evt/ir/layout_algorithm.py", "repo_id": "python", "token_count": 5880 }
40
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Registry of elementwise epilogues Elementwise epilogues can be added to many CUTLASS kernels in the CUTLAS Python interface via code like the following for GEMM: .. highlight:: python .. code-block:: python plan = cutlass.op.Gemm(element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor) plan.activation = cutlass.epilogue.relu """ from cutlass.backend import epilogue gelu = epilogue.gelu hardswish = epilogue.hardswish identity = epilogue.identity leaky_relu = epilogue.leaky_relu relu = epilogue.relu sigmoid = epilogue.sigmoid silu = epilogue.silu tanh = epilogue.tanh _activations = [gelu, hardswish, identity, leaky_relu, relu, sigmoid, silu, tanh] def get_activations() -> list: """ Returns a list of available activation functions :return: list of available activation functions :rtype: list """ return _activations def get_activation_epilogue( activation, element_output, elements_per_access, element_accumulator, element_compute, ): """ Return an epilogue corresponding to the activation function, data types, and alignment used in the kernel :param activation: elementwise activation function to use :param element_output: data type of the output :param elements_per_access: alignment of operand C of the kernel :type elements_per_access: int :param element_accumulator: data type of the accumulated output C :param element_compute: data type in which compute operations should be performed :return: epilogue functor """ if activation not in _activations: raise Exception( f"Unsupported activation type {activation}. Available activations are: {_activations}" ) if activation == identity: return epilogue.LinearCombination( element_output, elements_per_access, element_accumulator, element_compute ) else: return epilogue.LinearCombinationGeneric( activation, element_output, elements_per_access, element_accumulator, element_compute, ) """ Frontend for EVT that generates epilogue functor through tracing the input function """ from cutlass.backend.evt.frontend import PythonASTFrontend def trace(fn, example_tensors, **kwargs): """ Trace `fn(**example_tensors)` and generates epilogue visitor :param fn: Python callables :param example_tensors: example inputs for fn :type example_tensors: dict .. hightlight:: python .. code-block:: python import cutlass.backend.evt # Define epilogue function as Python callable def example_fn(accum, C, alpha, beta, gamma): D = ((accum + C) * alpha - gamma) / beta return D # Define the example tensors example_inputs = { "accum": torch.empty(size=(6, 512, 512), dtype=torch.float16, device="cuda"), "C": torch.empty(size=(6, 512, 512), dtype=torch.float16, device="cuda"), "alpha": 1.5, "beta": 0.5, "gamma": 2.5, "D": torch.empty(size=(6, 512, 512), dtype=torch.float16, device="cuda") } # Generate the epilogue functor epilogue_visitor = cutlass.epilogue.trace(example_fn, example_inputs) """ if callable(fn): class EpilogueFunctor(PythonASTFrontend): def __init__(self, **kwargs): super().__init__(**kwargs) pass setattr(EpilogueFunctor, "__call__", staticmethod(fn)) epilogue_functor = EpilogueFunctor(**kwargs) epilogue_functor.trace(example_tensors) return epilogue_functor else: raise NotImplementedError("Expect a callable Python function")
python/cutlass/epilogue/epilogue.py/0
{ "file_path": "python/cutlass/epilogue/epilogue.py", "repo_id": "python", "token_count": 1965 }
41
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Utilities for emitting Conv3d kernels """ import enum import logging import os.path import shutil from string import Template try: import builtins if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True: raise ImportError("Disabling attempt to import cutlass_library") from cutlass_library.library import * from cutlass_library.conv3x_emitter import EmitConv3xInstance, EmitConv3xIncludes except ImportError: from library import * from conv3x_emitter import EmitConv3xInstance, EmitConv3xIncludes _LOGGER = logging.getLogger(__name__) ################################################################################################### # class Conv3dOperation: # def __init__(self, conv_kind, iterator_algorithm, arch, tile_description, A, B, C, element_epilogue, \ stride_support, epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4): self.operation_kind = OperationKind.Conv3d self.arch = arch self.tile_description = tile_description self.conv_kind = conv_kind self.A = A self.B = B self.C = C self.element_epilogue = element_epilogue self.epilogue_functor = epilogue_functor self.iterator_algorithm = iterator_algorithm self.stride_support = stride_support self.swizzling_functor = swizzling_functor # def is_mixed_input(self): return self.A.element != self.B.element # def core_name(self): ''' The basic operation kind is prefixed with a letter indicating the accumulation type. ''' intermediate_type = '' if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp: inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape) if self.tile_description.math_instruction.element_a != self.A.element and \ self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator: intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] else: inst_shape = '' return "%s%s%s%s3d_%s" % (ShortDataTypeNames[self.tile_description.math_instruction.element_accumulator], \ inst_shape, intermediate_type, ConvKindNames[self.conv_kind], IteratorAlgorithmNames[self.iterator_algorithm]) # def extended_name(self): ''' Append data types if they differ from compute type. ''' if self.C.element != self.tile_description.math_instruction.element_accumulator and \ self.A.element != self.tile_description.math_instruction.element_accumulator: extended_name = "${element_c}_${core_name}_${element_a}" elif self.C.element == self.tile_description.math_instruction.element_accumulator and \ self.A.element != self.tile_description.math_instruction.element_accumulator: extended_name = "${core_name}_${element_a}" else: extended_name = "${core_name}" extended_name = SubstituteTemplate(extended_name, { 'element_a': DataTypeNames[self.A.element], 'element_c': DataTypeNames[self.C.element], 'core_name': self.core_name() }) return extended_name # def configuration_name(self): ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class] threadblock = "%dx%d_%dx%d" % ( self.tile_description.threadblock_shape[0], self.tile_description.threadblock_shape[1], self.tile_description.threadblock_shape[2], self.tile_description.stages ) if self.stride_support == StrideSupport.Unity: configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}_unity_stride" else: configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}" return SubstituteTemplate( configuration_name, { 'opcode_class': opcode_class_name, 'extended_name': self.extended_name(), 'threadblock': threadblock, } ) # def procedural_name(self): ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' return self.configuration_name() ################################################################################################### # # Emits single instances of a CUTLASS device-wide operator # ################################################################################################### class EmitConv3dInstance: def __init__(self): # Emitter for CUTLASS 3 convolution operations self.conv3x_emitter = EmitConv3xInstance() self.template = """ // Conv3d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}" using ${operation_name}_base = typename cutlass::conv::kernel::DefaultConv3d${conv_kind_name}< ${element_a}, cutlass::layout::TensorNDHWC, ${element_b}, cutlass::layout::TensorNDHWC, ${element_c}, cutlass::layout::TensorNDHWC, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}< ${element_c}, ${epilogue_vector_length}, ${element_accumulator}, ${element_epilogue} >, ${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>, ${stages}, cutlass::arch::OpMultiplyAdd, ${iterator_algorithm}, ${stride_support} >::Kernel; """ def emit(self, operation): _LOGGER.debug("*** EmitConv3dInstance::emit") _LOGGER.debug("*** operation: procedural_name()=" + operation.procedural_name()) if hasattr(operation, 'is_3x') and operation.is_3x: _LOGGER.debug("*** CUTLASS 3 operation") return self.conv3x_emitter.emit(operation) _LOGGER.debug("*** CUTLASS 2 operation") warp_shape = [int(operation.tile_description.threadblock_shape[idx] / operation.tile_description.warp_count[idx]) for idx in range(3)] epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) values = { 'operation_name': operation.procedural_name(), 'conv_kind': ConvKindTag[operation.conv_kind], 'conv_kind_name': ConvKindNames[operation.conv_kind].capitalize(), 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[operation.A.layout], 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[operation.B.layout], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[operation.C.layout], 'element_accumulator': DataTypeTag[operation.tile_description.math_instruction.element_accumulator], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'epilogue_vector_length': str(epilogue_vector_length), 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], 'stages': str(operation.tile_description.stages), 'iterator_algorithm': IteratorAlgorithmTag[operation.iterator_algorithm], 'iterator_algorithm_name': IteratorAlgorithmNames[operation.iterator_algorithm].capitalize(), 'stride_support': StrideSupportTag[operation.stride_support] } return SubstituteTemplate(self.template, values) ################################################################################################### # # Generator functions for all layouts # ################################################################################################### # def GenerateConv3dTensorOp(manifest, tile_descriptions, min_cc, align = 128): for tile in tile_descriptions: for conv_kind in [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad]: if conv_kind == ConvKind.Fprop or (tile.math_instruction.element_accumulator in [DataType.f16, DataType.f32]): # output_types = [tile.math_instruction.element_a, tile.math_instruction.element_accumulator] \ if DataTypeSize[tile.math_instruction.element_accumulator] == 32 \ else [tile.math_instruction.element_accumulator,] for output_type in output_types: A = TensorDescription(tile.math_instruction.element_a, LayoutType.TensorNDHWC, int(align / DataTypeSize[tile.math_instruction.element_a])) B = TensorDescription(tile.math_instruction.element_b, LayoutType.TensorNDHWC, int(align / DataTypeSize[tile.math_instruction.element_b])) C = TensorDescription(output_type, LayoutType.TensorNDHWC, max(1, int(align / DataTypeSize[output_type]))) manifest.append(Conv3dOperation(conv_kind, min_cc, tile, A, B, C, tile.math_instruction.element_accumulator)) class EmitConv3dIncludes: '''Emit includes that are specific to the operation.''' def __init__(self): self.includes = ['conv3d_operation.h'] self.emitter_3x = EmitConv3xIncludes() def operation_is_3x(self, operation) -> bool: """Whether operation is a CUTLASS 3 convolution (as opposed to CUTLASS 2)""" return hasattr(operation, 'is_3x') and operation.is_3x def emit(self, operation) -> str: if self.operation_is_3x(operation): return self.emitter_3x.emit(operation) return '\n'.join(f"#include \"{incl}\"" for incl in self.includes) + \ "\n\n///////////////////////////////////////////////////////////////////////////////////////////////////" ################################################################################################### # # Emitters functions for all targets # ################################################################################################### class EmitConv3dConfigurationLibrary: def __init__(self, operation_path, configuration_name): self.configuration_name = configuration_name self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name) self.instance_emitter = EmitConv3dInstance() self.includes_emitter = EmitConv3dIncludes() self.header_template = """ /* Generated by conv3d_operation.py - Do not edit. */ /////////////////////////////////////////////////////////////////////////////////////////////////// #include "cutlass/cutlass.h" #include "cutlass/library/library.h" #include "cutlass/library/manifest.h" #include "library_internal.h" """ self.instance_template = """ ${stub_begin} ${operation_instance} // Derived class struct ${operation_name} : public ${operation_name}_base { }; ${stub_end} /////////////////////////////////////////////////////////////////////////////////////////////////// """ self.configuration_header = """ namespace cutlass { namespace library { // Initialize all instances void initialize_${configuration_name}(Manifest &manifest) { """ self.configuration_instance = """${stub_begin} using Operation_${operation_name} = cutlass::conv::device::${kernel_name}< ${operation_name}>; manifest.append(new cutlass::library::${operation_wrapper}< Operation_${operation_name} >( "${operation_name}" )); ${stub_end} """ self.configuration_epilogue = "}\n" self.epilogue_template = """ /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////// """ def operation_is_3x(self, operation): """Whether operation is a CUTLASS 3 convolution (as opposed to CUTLASS 2)""" return hasattr(operation, 'is_3x') and operation.is_3x def __enter__(self): """ Open the configuration_file, and write the "header" C++ code to it. The "header" consists of a comment (that this is generated code, so it should not be edited), and includes that are common to both the CUTLASS 2 and the CUTLASS 3 cases. """ _LOGGER.debug('*** EmitConv3dConfigurationLibrary::__enter__') _LOGGER.debug('*** configuration_path (file to write): ' + str(self.configuration_path)) _LOGGER.debug('*** configuration_name: ' + self.configuration_name) self.configuration_file = open(self.configuration_path, "w") self.configuration_file.write(SubstituteTemplate(self.header_template, { 'configuration_name': self.configuration_name })) self.operations = [] return self def emit(self, operation): """ Write three pieces of C++ code to the configuration_file (that was opened by the __enter__ method above): 1. the header includes that are specific to the operation (CUTLASS 2 vs. CUTLASS 3); 2. the "operation instance" (a "using" declaration ending in "_base"); and 3. the "operation name" (declaration and definition of a derived class of the above operation instance). The "using" declaration turns a C++ class name, possibly namespace-qualified, possibly also with angle brackets, into a C-style, easily demangled identifier. """ _LOGGER.debug('*** EmitConv3dConfigurationLibrary::emit') _LOGGER.debug('*** operation.procedural_name(): ' + operation.procedural_name()) self.operations.append(operation) self.configuration_file.write(self.includes_emitter.emit(operation)) stub_begin = '' stub_end = '' # It can be useful to stub (comment) out instantiations for testing. # In this case, one need only set is_stub to True. is_stub = False if is_stub: stub_begin = "// STUB for now\n#if 0" stub_end = '#endif // 0' self.configuration_file.write(Template(self.instance_template).substitute({ 'configuration_name': self.configuration_name, 'operation_name': operation.procedural_name(), 'operation_instance': self.instance_emitter.emit(operation), 'stub_begin': stub_begin, 'stub_end': stub_end })) def __exit__(self, exception_type, exception_value, traceback): """ Write the rest of the C++ code to the configuration_file, and close the file. The "rest of the C++ code" has the following components. 1. Configuration header: Open the namespace(s), and open the definition of the "initialize_${configuration_name}" registration function that registers the operation with the Manifest. ("Registration" helps turn C++ compile-time polymorphism (via template parameters) into a run-time choice of parameters.) 2. Configuration instance: In the body of the registration function, make a "using" declaration Operation_${operation_name} for the operation type (which uses operation_name as its template argument). Then, tell the manifest about the operation via a "manifest.append" call. The argument of the call is a new instance of "SomethingOperation<Operation_${operation_name}>" (replace Something with a specific name). 3. Configuration epilogue: Close the definition of the registration function. 4. Epilogue template: Close the namespace(s). """ _LOGGER.debug('*** EmitConv3dConfigurationLibrary::__exit__') _LOGGER.debug('*** configuration_path (file to write): ' + str(self.configuration_path)) _LOGGER.debug('*** configuration_name: ' + self.configuration_name) self.configuration_file.write(SubstituteTemplate(self.configuration_header, { 'configuration_name': self.configuration_name })) for operation in self.operations: stub_begin = '' stub_end = '' # It can be useful to stub (comment) out instantiations for testing. # In this case, one need only set is_stub to True. is_stub = False if is_stub: stub_begin = "// STUB for now\n#if 0" stub_end = "#endif // 0" kernel_name = 'ImplicitGemmConvolution' operation_wrapper = 'Conv3dOperation' if self.operation_is_3x(operation): kernel_name = 'ConvUniversalAdapter' operation_wrapper = 'ConvOperation3x' self.configuration_file.write(SubstituteTemplate(self.configuration_instance, { 'configuration_name': self.configuration_name, 'operation_name': operation.procedural_name(), 'kernel_name': kernel_name, 'operation_wrapper': operation_wrapper, 'stub_begin': stub_begin, 'stub_end': stub_end })) self.configuration_file.write(self.configuration_epilogue) self.configuration_file.write(self.epilogue_template) self.configuration_file.close() ################################################################################################### ###################################################################################################
python/cutlass_library/conv3d_operation.py/0
{ "file_path": "python/cutlass_library/conv3d_operation.py", "repo_id": "python", "token_count": 6540 }
42
Emitters ======== Common ------ .. automodule:: cutlass.emit.common :members: :undoc-members: :show-inheritance: PyTorch ------- .. automodule:: cutlass.emit.pytorch :members: :undoc-members: :show-inheritance:
python/docs_src/source/cutlass.emit.rst/0
{ "file_path": "python/docs_src/source/cutlass.emit.rst", "repo_id": "python", "token_count": 98 }
43
################################################################################ # # Copyright (c) 20123 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################ """ Testbed classes of EVT """ import torch import unittest import cutlass from cutlass import Tensor import cutlass.backend.evt from cutlass.shape import GemmCoord from cutlass.utils.datatypes import torch_type from cutlass.utils.profiler import CUDAEventProfiler class EVTReferenceModule: def __init__(self, layout_A, layout_B, layout_C, epilogue_visitor): self.layout_A = layout_A self.layout_B = layout_B self.layout_C = layout_C self.epilogue_visitor = epilogue_visitor def run(self, A, B, C, problem_size, alpha, beta, batch=1): if self.layout_A == cutlass.LayoutType.RowMajor: A_row = A.view((batch, problem_size.m, problem_size.k)) else: A_col = A.view((batch, problem_size.k, problem_size.m)) A_row = torch.permute(A_col, (0, 2, 1)) if self.layout_B == cutlass.LayoutType.RowMajor: B_row = B.view((batch, problem_size.k, problem_size.n)) else: B_col = B.view((batch, problem_size.n, problem_size.k)) B_row = torch.permute(B_col, (0, 2, 1)) if self.layout_C == cutlass.LayoutType.RowMajor: C_row = C.view((batch, problem_size.m, problem_size.n)) else: C_col = C.view((batch, problem_size.n, problem_size.m)) C_row = torch.permute(C_col, (0, 2, 1)) out_row = torch.matmul(A_row, B_row) * alpha + C_row * beta if self.layout_C == cutlass.LayoutType.ColumnMajor: out = torch.permute(out_row, (0, 2, 1)) else: out = out_row return torch.flatten(out) def __call__(self, A, B, C, problem_size, batch=1, epilogue_args=None): # Running the mainloop accum = self.run( A, B, C, problem_size, 1.0, 0.0, batch=batch ).reshape(batch, problem_size.m, problem_size.n) # Running the epilogue epilogue_args["accum"] = accum references = self.epilogue_visitor(**epilogue_args) # Return the results if not isinstance(references, tuple): references = (references,) return references class EVTTestBed: """ Epilogue Visitor Testbed """ def __init__(self, element, evt_fn, example_inputs, profile=False, **kwargs) -> None: self.element = element layout = cutlass.LayoutType.RowMajor self.example_inputs = example_inputs # Create the Gemm plan self.plan = cutlass.op.Gemm(element=element, layout=layout, element_accumulator=torch.float32) if "tile_description" in kwargs: self.plan.tile_description = kwargs["tile_description"] if "swizzling_functor" in kwargs: self.plan.swizzling_functor = kwargs["swizzling_functor"] # Compile the epilogue visitor epilogue_visitor = cutlass.epilogue.trace(evt_fn, example_inputs) if "epilogue_stages" in kwargs: epilogue_visitor.epilogue_stages = kwargs["epilogue_stages"] self.plan.epilogue_visitor = epilogue_visitor # Reference model self.reference_fn = EVTReferenceModule(layout, layout, layout, epilogue_visitor) self.profile = profile def get_torch_tensor(self, shape, dtype=None, fill=None): if dtype is None: dtype = self.element dtype = torch_type(dtype) if fill is None: return torch.ceil( torch.empty(size=shape, dtype=dtype, device="cuda").uniform_(-4.5, 3.5) ) else: return torch.full(shape, fill, dtype=dtype, device="cuda") def verify(self, problem_size, input_keys, result_keys, batch_count=1): """ Verify the results """ problem_size = GemmCoord(*problem_size) # Initiate the GEMM arguments tensor_A = self.get_torch_tensor((batch_count, problem_size.m, problem_size.k)) tensor_B = self.get_torch_tensor((batch_count, problem_size.k, problem_size.n)) # Initialize the epilogue args epilogue_args = {} for key in self.example_inputs.keys(): if key in input_keys: tensor = self.example_inputs[key] if isinstance(tensor, Tensor): epilogue_args[key] = self.get_torch_tensor(tensor.shape, tensor.element) else: epilogue_args[key] = tensor elif key in result_keys: tensor = self.example_inputs[key] if isinstance(tensor, Tensor): if "max" in key: fill = -1000 else: fill = 0 epilogue_args[key] = self.get_torch_tensor(tensor.shape, tensor.element, fill=fill) else: epilogue_args[key] = tensor tensor_D = epilogue_args["D"] if "C" in epilogue_args: tensor_C = epilogue_args["C"] else: tensor_C = tensor_D # Run the device kernel self.plan.run(tensor_A, tensor_B, tensor_C, tensor_D, visitor_args=epilogue_args) # Run the host reference evt_args_inputs = {} for key in input_keys: evt_args_inputs[key] = epilogue_args[key] reference_results = self.reference_fn( tensor_A, tensor_B, tensor_C, problem_size, batch_count, evt_args_inputs) # Compare the results for result, ref in zip(result_keys, reference_results): assert torch.equal(epilogue_args[result].flatten(), ref.flatten()) # Run profile if self.profile: profiler = CUDAEventProfiler( self.plan, 100, 100, tensor_A, tensor_B, tensor_C, tensor_D, visitor_args = epilogue_args ) print(f"Cutlass Python Duration: {profiler()}") class EVTTestCaseBase(unittest.TestCase): """ Base class for EVT Unittest """ def __init__(self, methodName: str = "runTest", lmnk=(6, 512, 256, 128)) -> None: super().__init__(methodName) self.element = cutlass.DataType.f16 self.l, self.m, self.n, self.k = lmnk self.problem_size = (self.m, self.n, self.k) torch.random.manual_seed(42) def fake_tensor(self, element, shape): return Tensor(element=element, shape=shape, layout_tag=cutlass.LayoutType.RowMajor) def get_problem_sizes(self, alignment, k=None, batch_count=[3,]): k = k if k else self.k problem_size_m = [alignment, 512 - 3 * alignment] problem_size_n = [alignment, 512 - alignment] if alignment % 8 == 0: problem_size_m.append(768) problem_size_n.append(768) problem_size_l = batch_count problem_sizes = [] for m in problem_size_m: for n in problem_size_n: for l in problem_size_l: problem_sizes.append((m, n, k, l)) return problem_sizes
test/python/cutlass/evt/utils/evt_testbed.py/0
{ "file_path": "test/python/cutlass/evt/utils/evt_testbed.py", "repo_id": "test", "token_count": 4021 }
44
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Test the EVT interface """ import numpy as np import unittest import cutlass from cutlass import LayoutType, Tensor from cutlass.backend.utils.device import device_cc from cutlass.epilogue import reshape, permute from utils import ExpectException @unittest.skipIf(device_cc() not in [80, 90], "This unittest is for Sm80 and Sm90 only") class EVTErrorTests(unittest.TestCase): """ Tests various error scenarios that arise with the EVT interface """ @unittest.skipIf(device_cc() != 90, "Only Sm90 EVT requires root node be 'D'") def test_root_not_d(self): """ Test when "D" does not exist in Sm90 EVT """ def evt_root_not_d(accum, alpha): F = accum * alpha return F example_tensors = { "accum": self.fake_tensor(np.float16, (6, 512, 512)), "alpha": 1.2, "F": self.fake_tensor(np.float16, (6, 512, 512)) } with ExpectException(device_cc() == 90, "SyntaxError: Sm90 EVT requires the epilogue to have a returned tensor D, " "but the variable 'D' is not found in the return values.", True): cutlass.epilogue.trace(evt_root_not_d, example_tensors) def test_no_accum(self): """ Test when "accum" is not in input arguments """ def evt_no_accum(alpha, C): D = alpha * C return D example_tensors = { "C": self.fake_tensor(np.float16, (6, 512, 512)), "alpha": 1.2, "D": self.fake_tensor(np.float16, (6, 512, 512)) } with ExpectException(True, "SyntaxError: Cannot find 'accum' in the argument list.", True): cutlass.epilogue.trace(evt_no_accum, example_tensors) @unittest.skipIf(device_cc() != 90, "Only Sm90 EVT has concern on smem size") def test_too_much_shared_memory(self): """ Test when the epilogue consumes too much shared memory """ def evt_too_much_shared_memory(accum, C1, C2, C3, C4, C5, C6, C7, C8): D1 = accum + C1 D2 = D1 + C2 D3 = D2 + C3 D4 = D3 + C4 D5 = D4 + C5 D6 = D5 + C6 D7 = D6 + C7 D = D7 + C8 return D, D1, D2, D3, D4, D5, D6, D7 example_tensors = { "accum": self.fake_tensor(np.float16, (6, 512, 512)), "C1": self.fake_tensor(np.float16, (6, 512, 512)), "C2": self.fake_tensor(np.float16, (6, 512, 512)), "C3": self.fake_tensor(np.float16, (6, 512, 512)), "C4": self.fake_tensor(np.float16, (6, 512, 512)), "C5": self.fake_tensor(np.float16, (6, 512, 512)), "C6": self.fake_tensor(np.float16, (6, 512, 512)), "C7": self.fake_tensor(np.float16, (6, 512, 512)), "C8": self.fake_tensor(np.float16, (6, 512, 512)), "D1": self.fake_tensor(np.float16, (6, 512, 512)), "D2": self.fake_tensor(np.float16, (6, 512, 512)), "D3": self.fake_tensor(np.float16, (6, 512, 512)), "D4": self.fake_tensor(np.float16, (6, 512, 512)), "D5": self.fake_tensor(np.float16, (6, 512, 512)), "D6": self.fake_tensor(np.float16, (6, 512, 512)), "D7": self.fake_tensor(np.float16, (6, 512, 512)), "D": self.fake_tensor(np.float16, (6, 512, 512)) } epilogue_visitor = cutlass.epilogue.trace(evt_too_much_shared_memory, example_tensors) plan = cutlass.op.Gemm( element=np.float16, layout=cutlass.LayoutType.RowMajor, element_accumulator=np.float32 ) with ExpectException(True, "RuntimeError: The epilogue consumes too much shared memory. " "No valid tile description is found in the generator.", True): plan.epilogue_visitor = epilogue_visitor def test_not_ssa(self): """ Test when the epilogue is not in SSA """ def evt_redefine(accum, C, alpha): F = accum + C F = F * alpha D = F return D, F example_tensors = { "accum": self.fake_tensor(np.float16, (6, 512, 512)), "C": self.fake_tensor(np.float16, (6, 512, 512)), "alpha": 1.5, "D": self.fake_tensor(np.float16, (6, 512, 512)), "F": self.fake_tensor(np.float16, (6, 512, 512)) } with ExpectException(True, "SyntaxError: Variable 'F' cannot be defined twice.", True): cutlass.epilogue.trace(evt_redefine, example_tensors) def evt_undefine(accum, alpha): F = accum + C D = F * alpha return D, F example_tensors = { "accum": self.fake_tensor(np.float16, (6, 512, 512)), "alpha": 1.5, "D": self.fake_tensor(np.float16, (6, 512, 512)), "F": self.fake_tensor(np.float16, (6, 512, 512)) } with ExpectException(True, "SyntaxError: Variable 'C' is undefined.", True): cutlass.epilogue.trace(evt_undefine, example_tensors) def test_missing_example_tensor(self): """ Test when the example tensor of an input/output variable is not provided """ def evt_missing_example_tensor(accum, C): D = accum + C return D example_tensors = { "accum": self.fake_tensor(np.float16, (6, 512, 512)), "C": self.fake_tensor(np.float16, (6, 512, 512)), } with ExpectException(True, "RuntimeError: Example input for D is not provided.", True): cutlass.epilogue.trace(evt_missing_example_tensor, example_tensors) example_tensors = { "accum": self.fake_tensor(np.float16, (6, 512, 512)), "D": self.fake_tensor(np.float16, (6, 512, 512)), } with ExpectException(True, "RuntimeError: Example input for C is not provided.", True): cutlass.epilogue.trace(evt_missing_example_tensor, example_tensors) def test_return_expression(self): """ Test when the return value is an expression """ def evt_return_expr(accum, C): return accum + C example_tensors = { "accum": self.fake_tensor(np.float16, (6, 512, 512)), "C": self.fake_tensor(np.float16, (6, 512, 512)), } with ExpectException(True, "SyntaxError: Return value cannot be an expression", True): cutlass.epilogue.trace(evt_return_expr, example_tensors) def test_incompatible_shape(self): """ Test when the shape of example tensors are incompatible """ def evt_incompatible_shape(accum, C): D = accum + C return D example_tensors = { "accum": self.fake_tensor(np.float16, (6, 256, 512)), "C": self.fake_tensor(np.float16, (6, 512, 512)), "D": self.fake_tensor(np.float16, (6, 512, 512)) } with ExpectException(True, "RuntimeError: Dimension mismatch between accum(6, 256, 512), C(6, 512, 512).", True): cutlass.epilogue.trace(evt_incompatible_shape, example_tensors) def test_no_matching_impl(self): def evt_no_matching_impl(accum, bias): D = accum + reshape(permute(bias, indices=(1, 0)), new_shape=(512, 1)) return D example_tensors = { "accum": self.fake_tensor(np.float16, (6, 512, 256)), "bias": self.fake_tensor(np.float16, (16, 32)), "D": self.fake_tensor(np.float16, (6, 512, 256)) } with ExpectException(True, "NotImplementedError: No matching op for node bias with stride (0, (1, 32), 0).", True): cutlass.epilogue.trace(evt_no_matching_impl, example_tensors) # # Helper functions # def fake_tensor(self, element, shape): return Tensor(element=element, shape=shape, layout_tag=LayoutType.RowMajor) if __name__ == '__main__': unittest.main()
test/python/cutlass/interface/evt_interface.py/0
{ "file_path": "test/python/cutlass/interface/evt_interface.py", "repo_id": "test", "token_count": 4683 }
45
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Implicit GEMM testbed sizes for Conv2d problem */ #pragma once #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/aligned_buffer.h" #include "cutlass/numeric_types.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/conv3d_problem_size.h" namespace test { namespace conv { namespace device { using Conv3dProblemVector = std::vector<cutlass::conv::Conv3dProblemSize>; //////////////////////////////////////////////////////////////////////////// /// Structure TestbedConv3dProblemSizes initializes and holds conv default and /// important network sizes //////////////////////////////////////////////////////////////////////////// struct TestbedConv3dProblemSizes { // // Data members // int minimum_channel_size; Conv3dProblemVector conv3d_default_sizes; Conv3dProblemVector conv3d_vnet_medical_sizes; // // Methods // /// Default ctor TestbedConv3dProblemSizes(int minimum_channel_size_ = 64): minimum_channel_size (minimum_channel_size_) { initialize_conv3d_default_sizes(); initialize_conv3d_vnet_medical_sizes(conv3d_vnet_medical_sizes, 1 /*batch-size*/); filter_all(); } /// Eliminates some illegal cases void filter_all() { Conv3dProblemVector *problems_vectors[] = { &conv3d_default_sizes, &conv3d_vnet_medical_sizes }; for (Conv3dProblemVector *problems : problems_vectors) { Conv3dProblemVector filtered; for (cutlass::conv::Conv3dProblemSize const & problem : *problems) { if (!(problem.C % minimum_channel_size)) { filtered.push_back(problem); } } *problems = filtered; } } // Add a few standard convolution problem sizes void initialize_conv3d_default_sizes() { conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 1, 3, 3, minimum_channel_size}, // input size (NDHWC) {8, 1, 1, 1, minimum_channel_size}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 1, 1, 8, minimum_channel_size}, // input size (NDHWC) {8, 1, 1, 3, minimum_channel_size}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 1, 1, 8, minimum_channel_size}, // input size (NDHWC) {8, 1, 1, 3, minimum_channel_size}, // filter size (KTRSC) CUTLASS_STL_NAMESPACE::make_tuple( cutlass::Coord<3>({1, 1, 1}), // near padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({0, 0, 0}) // far padding (pad_d, pad_h, pad_w) ), cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 8, 8, 8, minimum_channel_size}, // input size (NDHWC) {8, 3, 3, 3, minimum_channel_size}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 8, 8, 8, minimum_channel_size}, // input size (NDHWC) {8, 3, 3, 3, minimum_channel_size}, // filter size (KTRSC) CUTLASS_STL_NAMESPACE::make_tuple( cutlass::Coord<3>({1, 1, 1}), // near padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({0, 0, 0}) // far padding (pad_d, pad_h, pad_w) ), cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 16, 16, 16, minimum_channel_size}, // input size (NDHWC) {8, 3, 3, 3, minimum_channel_size}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 1, 15, 19, 160}, // input size (NDHWC) {224, 1, 3, 6, 160}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 2, 1, 1, minimum_channel_size}, // input size (NDHWC) {8, 2, 1, 1, minimum_channel_size}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 1, 7, 7, minimum_channel_size}, // input size (NDHWC) {16, 1, 3, 3, minimum_channel_size}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_default_sizes.push_back(cutlass::conv::Conv3dProblemSize( {1, 11, 15, 19, 64}, // input size (NDHWC) {32, 4, 3, 6, 64}, // filter size (KTRSC) cutlass::Coord<3>({2, 1, 3}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); } // Add vnet layers to unit testing sizes void initialize_conv3d_vnet_medical_sizes(Conv3dProblemVector &conv3d_problem_vector, int batch_size = 1) { conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 32, 32, 32, 16}, // input size (NDHWC) {32, 2, 2, 2, 16}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({2, 2, 2}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 16, 16, 16, 32}, // input size (NDHWC) {32, 3, 3, 3, 32}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 16, 16, 16, 32}, // input size (NDHWC) {64, 2, 2, 2, 32}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({2, 2, 2}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 8, 8, 8, 64}, // input size (NDHWC) {64, 3, 3, 3, 64}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 8, 8, 8, 64}, // input size (NDHWC) {128, 2, 2, 2, 64}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({2, 2, 2}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 4, 4, 4, 128}, // input size (NDHWC) {128, 3, 3, 3, 128}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 8, 8, 8, 128}, // input size (NDHWC) {128, 3, 3, 3, 128}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 16, 16, 16, 64}, // input size (NDHWC) {64, 3, 3, 3, 64}, // filter size (KTRSC) cutlass::Coord<3>({1, 1, 1}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({1, 1, 1}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 32, 32, 32, 16}, // input size (NDHWC) {64, 2, 2, 2, 16}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({2, 2, 2}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); conv3d_problem_vector.push_back(cutlass::conv::Conv3dProblemSize( {batch_size, 16, 16, 16, 32}, // input size (NDHWC) {128, 2, 2, 2, 32}, // filter size (KTRSC) cutlass::Coord<3>({0, 0, 0}), // padding (pad_d, pad_h, pad_w) cutlass::Coord<3>({2, 2, 2}), // stride (stride_d, stride_h, stride_w) cutlass::Coord<3>({1, 1, 1}) // dilation (dilation_d, dilation_h, dilation_w) )); } }; } // namespace device } // namespace conv } // namespace test
test/unit/conv/device/conv3d_problems.h/0
{ "file_path": "test/unit/conv/device/conv3d_problems.h", "repo_id": "test", "token_count": 6131 }
46
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types and is safe to use in a union. */ #include "../common/cutlass_unit_test.h" #include "cutlass/array.h" #include "cutlass/core_io.h" #include "cutlass/numeric_types.h" #include "cutlass/numeric_conversion.h" #include "cutlass/layout/matrix.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/host_tensor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void convert_bf16_f32(cutlass::bfloat16_t *output, float const *input, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N) { output[tid] = static_cast<cutlass::bfloat16_t>(input[tid]); } } __global__ void convert_and_pack_bf16(cutlass::bfloat16_t *output, float const *input, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid * 2 < N) { cutlass::NumericArrayConverter<cutlass::bfloat16_t, float, 2> convert; cutlass::Array<cutlass::bfloat16_t, 2> *dst_ptr = reinterpret_cast<cutlass::Array<cutlass::bfloat16_t, 2> *>(output + tid * 2); cutlass::Array<float, 2> const *src_ptr = reinterpret_cast<cutlass::Array<float, 2> const *>(input + tid * 2); *dst_ptr = convert(*src_ptr); } } TEST(bfloat16_t, device_conversion) { using T = cutlass::bfloat16_t; using S = float; int const N = 256; cutlass::HostTensor<T, cutlass::layout::RowMajor> destination({N, 1}); cutlass::HostTensor<S, cutlass::layout::RowMajor> source({N, 1}); for (int i = 0; i < N; ++i) { source.at({i, 0}) = float(i - 128); destination.at({i, 0}) = T(0); } source.sync_device(); destination.sync_device(); convert_bf16_f32<<< dim3(1,1), dim3(N, 1) >>>(destination.device_data(), source.device_data(), N); ASSERT_EQ(cudaGetLastError(), cudaSuccess) << "Kernel launch error."; destination.sync_host(); int errors = 0; for (int i = 0; i < N; ++i) { T got = destination.at({i, 0}); S expected = source.at({i, 0}); if (S(got) != expected) { ++errors; if (errors < 10) { std::cerr << "Basic conversion error - [" << i << "] - got " << got << ", expected " << expected << "\n"; } } destination.at({i, 0}) = T(0); } destination.sync_device(); convert_and_pack_bf16<<< dim3(1,1), dim3(N, 1) >>>(destination.device_data(), source.device_data(), N); ASSERT_EQ(cudaGetLastError(), cudaSuccess) << "Kernel launch error."; destination.sync_host(); for (int i = 0; i < N; ++i) { T got = destination.at({i, 0}); S expected = source.at({i, 0}); if (S(got) != expected) { ++errors; if (errors < 10) { std::cerr << "Convert and pack error - [" << i << "] - got " << got << ", expected " << expected << "\n"; } } } EXPECT_EQ(errors, 0); } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Host // ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(bfloat16_t, host_conversion) { for (int i = -128; i < 128; ++i) { float f = static_cast<float>(i); cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i); cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(f); EXPECT_TRUE(static_cast<int>(x) == i); EXPECT_TRUE(static_cast<float>(y) == f); } // Try out default-ctor (zero initialization of primitive proxy type) EXPECT_TRUE(cutlass::bfloat16_t() == 0.0_bf16); // Try out user-defined literals EXPECT_TRUE(cutlass::bfloat16_t(7) == 7_bf16); EXPECT_TRUE(7 == static_cast<int>(7_bf16)); } TEST(bfloat16_t, host_arithmetic) { for (int i = -100; i < 100; ++i) { for (int j = -100; j < 100; ++j) { cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i); cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(j); EXPECT_TRUE(static_cast<int>(x + y) == (i + j)); } } } TEST(bfloat16_t, host_round) { struct { uint32_t f32_bits; uint16_t expected; } tests[] = { {0x40040000, 0x4004}, // M=0, R=0, S=0 => rtz {0x40048000, 0x4004}, // M=0, R=1, S=0 => rtz {0x40040001, 0x4004}, // M=0, R=1, S=1 => +inf {0x4004c000, 0x4005}, // M=0, R=1, S=1 => +inf {0x4004a000, 0x4005}, // M=0, R=1, S=1 => +inf {0x40050000, 0x4005}, // M=1, R=0, S=0 => rtz {0x40054000, 0x4005}, // M=1, R=0, S=1 => rtz {0x40058000, 0x4006}, // M=1, R=1, S=0 => +inf {0x40058001, 0x4006}, // M=1, R=1, S=1 => +inf {0x7f800000, 0x7f80}, // +inf {0xff800000, 0xff80}, // -inf {0x7fffffff, 0x7fff}, // canonical NaN {0x7ff00001, 0x7fff}, // NaN -> canonical NaN {0xfff00010, 0x7fff}, // Nan -> canonical NaN {0, 0} }; bool running = true; for (int i = 0; running; ++i) { float f32 = reinterpret_cast<float const &>(tests[i].f32_bits); cutlass::bfloat16_t bf16 = cutlass::bfloat16_t(f32); bool passed = (tests[i].expected == bf16.raw()); EXPECT_TRUE(passed) << "Error - convert(f32: 0x" << std::hex << tests[i].f32_bits << ") -> 0x" << std::hex << tests[i].expected << "\ngot: 0x" << std::hex << bf16.raw(); if (!tests[i].f32_bits) { running = false; } } } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Device // /////////////////////////////////////////////////////////////////////////////////////////////////
test/unit/core/bfloat16.cu/0
{ "file_path": "test/unit/core/bfloat16.cu", "repo_id": "test", "token_count": 2746 }
47
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for basic uint128 functionality */ #include "../common/cutlass_unit_test.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/numeric_types.h" #include "cutlass/numeric_conversion.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/host_tensor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// // // Host // ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(uint128_t, host_arithmetic) { using T = cutlass::uint128_t; // only low 64bit for (uint64_t i = 0; i < 1024; ++i) { for (uint64_t j = 0; j < 1024; ++j) { T x = i; T y = j; EXPECT_TRUE(static_cast<uint64_t>(x + y) == (i + j)); } } // carry overflow for low uint64_t { for (uint64_t i = 0; i < 1024; ++i) { T x = static_cast<uint64_t>(0xFFFFFFFFFFFFFFFF); T y = i + 1; T z = x + y; EXPECT_EQ(z.hilo_.hi, static_cast<uint64_t>(0x1)); EXPECT_EQ(z.hilo_.lo, i); } } } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Device // ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void uint128_add_operator(cutlass::uint128_t *output, cutlass::uint128_t const *input, cutlass::uint128_t base, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N) { output[tid] = input[tid] + base; } } TEST(uint128_t, device_arithmetic) { using T = cutlass::uint128_t; int const N = 1024; cutlass::HostTensor<T, cutlass::layout::RowMajor> input({N, 1}); cutlass::HostTensor<T, cutlass::layout::RowMajor> sum({N, 1}); for (int i = 0; i < N; ++i) { input.at({i, 0}) = static_cast<uint64_t>(i + 1); } T b = static_cast<uint64_t>(0xFFFFFFFFFFFFFFFF); input.sync_device(); uint128_add_operator<<< dim3(1,1), dim3(N, 1) >>>(sum.device_data(), input.device_data(), b, N); ASSERT_EQ(cudaGetLastError(), cudaSuccess) << "Kernel launch error."; sum.sync_host(); for (int i = 0; i < N; ++i) { T got = sum.at({i, 0}); uint64_t expected_hi = static_cast<uint64_t>(0x1); uint64_t expected_lo = static_cast<uint64_t>(i); EXPECT_EQ(got.hilo_.hi, expected_hi); EXPECT_EQ(got.hilo_.lo, expected_lo); } }
test/unit/core/uint128.cu/0
{ "file_path": "test/unit/core/uint128.cu", "repo_id": "test", "token_count": 1371 }
48
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "cutlass_unit_test.h" #include <cutlass/trace.h> #include <cute/tensor.hpp> template <class Layout, class CoTarget> void test_complement(Layout const& layout, CoTarget const& cotarget) { using namespace cute; auto result = complement(layout, cotarget); CUTLASS_TRACE_HOST("complement(" << layout << ", " << cotarget << ") => " << result); auto completed = make_layout(layout, result); // Lower-bound on the codomain size of the layout ++ complement (1) EXPECT_GE(cosize(completed), size(cotarget)); // Upper-bound on the codomain size of the complement (2) EXPECT_LE(cosize(result), cute::round_up(size(cotarget), cosize(layout))); // Post-condition on the codomain of the complement for (int i = 1; i < size(result); ++i) { EXPECT_LT(result(i-1), result(i)); // Ordered (3) for (int j = 0; j < size(layout); ++j) { EXPECT_NE(result(i), layout(j)); // Disjoint (4) } } // Other observations EXPECT_LE(size(result), cosize(result)); // As a result of the ordered condition (3) EXPECT_GE(size(result), size(cotarget) / size(filter(layout))); EXPECT_LE(cosize(completed), cosize(result) + cosize(layout)); EXPECT_GE(cosize(result), size(cotarget) / size(filter(layout))); if constexpr (is_static<decltype(stride(completed))>::value) { // If we can apply complement again EXPECT_EQ(size(complement(completed)), 1); // There's no more codomain left over } } template <class Layout> void test_complement(Layout const& layout) { return test_complement(layout, cosize(layout)); } TEST(CuTe_core, Complement) { using namespace cute; CUTLASS_TRACE_HOST("-------------------------------"); CUTLASS_TRACE_HOST("COMPLEMENT"); CUTLASS_TRACE_HOST("-------------------------------"); { auto layout = Layout<_1,_0>{}; test_complement(layout); test_complement(layout, Int<2>{}); test_complement(layout, Int<5>{}); test_complement(layout, make_shape(Int<2>{}, 2)); } { auto layout = Layout<_1,_1>{}; test_complement(layout); test_complement(layout, Int<2>{}); test_complement(layout, Int<5>{}); test_complement(layout, make_shape(Int<2>{}, 2)); } { auto layout = Layout<_1,_2>{}; test_complement(layout, Int<1>{}); test_complement(layout, Int<2>{}); test_complement(layout, Int<8>{}); test_complement(layout, Int<5>{}); test_complement(layout, make_shape(Int<2>{}, 2)); } { auto layout = Layout<_4,_0>{}; test_complement(layout, Int<1>{}); test_complement(layout, Int<2>{}); test_complement(layout, Int<8>{}); } { auto layout = Layout<_4,_1>{}; test_complement(layout, Int<1>{}); test_complement(layout, Int<2>{}); test_complement(layout, Int<8>{}); } { auto layout = Layout<_4,_2>{}; test_complement(layout, Int<1>{}); test_complement(layout); test_complement(layout, Int<16>{}); test_complement(layout, Int<19>{}); test_complement(layout, make_shape(Int<2>{}, 2)); } { auto layout = Layout<_4,_4>{}; test_complement(layout, Int<1>{}); test_complement(layout); test_complement(layout, Int<17>{}); test_complement(layout, make_shape(Int<2>{}, 2)); } { auto layout = Layout<Shape<_2,_4>>{}; test_complement(layout); } { auto layout = Layout<Shape<_2,_3>>{}; test_complement(layout); } { auto layout = Layout<Shape<_2,_4>, Stride<_1,_4>>{}; test_complement(layout); } { auto layout = Layout<Shape<_2,_4>, Stride<_1,_6>>{}; test_complement(layout); } { auto layout = Layout<Shape<_2,_4,_8>, Stride<_8,_1,_64>>{}; test_complement(layout); } { auto layout = Layout<Shape<_2,_4,_8>, Stride<_8,_1,_0>>{}; test_complement(layout); test_complement(layout, Int<460>{}); } { auto layout = make_layout(Shape <Shape <_2,_2>,Shape <_2, _2>>{}, Stride<Stride<_1,_4>,Stride<_8,_32>>{}); test_complement(layout); } { auto layout = make_layout(Shape <Shape <_2, _2>,Shape <_2,_2>>{}, Stride<Stride<_1,_32>,Stride<_8,_4>>{}); test_complement(layout); } // Fails due to non-injective layout // { // auto layout = make_layout(Shape <Shape <_2,_2>,Shape <_2,_2>>{}, // Stride<Stride<_1,_8>,Stride<_8,_4>>{}); // test_complement(layout); // } // Fails due to non-injective layout // { // auto layout = Layout<Shape<_2,_2>, Stride<_2,_3>>{}; // test_complement(layout); // test_complement(layout, Int<19>{}); // } { auto layout = Layout<Shape<_4,_6>, Stride<_1,_6>>{}; test_complement(layout); } { auto layout = Layout<Shape<_4,_2>, Stride<_1,_10>>{}; test_complement(layout); } { auto layout = Layout<Shape<_4,_2>, Stride<_1,_16>>{}; test_complement(layout); } CUTLASS_TRACE_HOST("-------------------------------"); CUTLASS_TRACE_HOST("Dynamic shapes/strides"); CUTLASS_TRACE_HOST("-------------------------------"); { auto layout = make_layout(12); test_complement(layout, 1); test_complement(layout); test_complement(layout, 53); test_complement(layout, 128); } { auto layout = make_layout(12, 1); test_complement(layout, 1); test_complement(layout); test_complement(layout, 53); test_complement(layout, 128); } { auto layout = make_layout(12, Int<2>{}); test_complement(layout, 1); test_complement(layout); test_complement(layout, 53); test_complement(layout, 128); } { auto layout = make_layout(12, 2); test_complement(layout, 1); test_complement(layout); test_complement(layout, 53); test_complement(layout, 128); } { auto layout = make_layout(make_shape(3,6),make_stride(_1{}, _3{})); test_complement(layout); } { auto layout = make_layout(make_shape(3,6),make_stride(_1{}, _9{})); test_complement(layout); } { auto layout = make_layout(make_shape(3,6),make_stride(_1{}, _10{})); test_complement(layout); } { auto layout = make_layout(make_shape(make_shape(2,2), make_shape(2,2)), Stride<Stride<_1,_4>,Stride<_8,_32>>{}); test_complement(layout); } { auto layout = make_layout(Int<64>{}); test_complement(layout, make_shape(Int<32>{}, Int<4>{}, Int<4>{})); test_complement(layout, make_shape(Int<32>{}, Int<4>{}, 4)); } }
test/unit/cute/core/complement.cpp/0
{ "file_path": "test/unit/cute/core/complement.cpp", "repo_id": "test", "token_count": 3069 }
49
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide Rank 2k update interface */ #pragma once #include <iostream> #include <fstream> #include <sstream> #include "../../common/cutlass_unit_test.h" #include "cutlass/blas3.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/distribution.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/host/error_metrics.h" #include "cutlass/util/reference/host/rank_2k.h" #include "cutlass/util/reference/host/rank_2k_complex.h" #include "testbed_utils.h" namespace test { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Rank2K> struct TestbedRank2KUniversal { using ElementA = typename Rank2K::ElementA; using ElementB = typename Rank2K::ElementB; using ElementC = typename Rank2K::ElementC; using ElementAccumulator = typename Rank2K::ElementAccumulator; using ElementCompute = typename Rank2K::Rank2Kkernel::Epilogue::OutputOp::ElementCompute; /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; uint64_t seed; cutlass::HostTensor<typename Rank2K::ElementA, typename Rank2K::LayoutA> tensor_A; cutlass::HostTensor<typename Rank2K::ElementB, typename Rank2K::LayoutB> tensor_B; cutlass::HostTensor<typename Rank2K::ElementC, typename Rank2K::LayoutC> tensor_C; cutlass::HostTensor<typename Rank2K::ElementC, typename Rank2K::LayoutC> tensor_D; cutlass::HostTensor<typename Rank2K::ElementC, typename Rank2K::LayoutC> reference_D; // // Methods // TestbedRank2KUniversal( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint64_t seed_ = 2080 ): init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed, int mantissa_in_bits) { if (dist_kind == cutlass::Distribution::Uniform) { double scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename Rank2K::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillRandomUniform( view, seed, scope_max, scope_min, mantissa_in_bits); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, mantissa_in_bits); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential( view.data(), view.capacity()); } else { EXPECT_TRUE(false) << "Input distribution not implemented"; return false; } return true; } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_symmetric_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed, int mantissa_in_bits) { if (dist_kind == cutlass::Distribution::Uniform) { double scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename Rank2K::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillSymmetricRandomUniform( view, seed, Rank2K::kFillModeC, scope_max, scope_min, mantissa_in_bits); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillSymmetricRandomGaussian( view, seed, Rank2K::kFillModeC, 0, 0.5, mantissa_in_bits); } else { EXPECT_TRUE(false) << "Input distribution (symmetric tensor) not implemented"; return false; } return true; } /// Initializes data structures void initialize(cutlass::gemm::GemmCoord problem_size) { // // Allocate the Rank2K workspace // tensor_A.resize(problem_size.mk()); tensor_B.resize(problem_size.mk()); tensor_C.resize(problem_size.mn()); tensor_D.resize(problem_size.mn()); reference_D.resize(problem_size.mn(), false); EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019, cutlass::MantissaInBits<typename Rank2K::ElementA>::bits)); EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018, cutlass::MantissaInBits<typename Rank2K::ElementB>::bits)); EXPECT_TRUE(initialize_symmetric_tensor(tensor_C.host_view(), init_C, seed + 2017, cutlass::MantissaInBits<typename Rank2K::ElementC>::bits)); // It is possible to randomly initialize to all zeros, so override this with non-zeros // in the upper left corner of each operand. tensor_A.host_view().at({0, 0}) = typename Rank2K::ElementA(1); tensor_B.host_view().at({0, 0}) = typename Rank2K::ElementB(1); tensor_C.host_view().at({0, 0}) = typename Rank2K::ElementC(1); cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view()); tensor_A.sync_device(); tensor_B.sync_device(); tensor_C.sync_device(); tensor_D.sync_device(); } /// Compares computed reference with device reference and outputs to a file if incorrect bool compare_reference( cutlass::gemm::GemmCoord problem_size, ElementCompute alpha, ElementCompute beta) { tensor_D.sync_host(); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); if (tensor_D.size() > 1) EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); if (reference_D.size() > 1) EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); double l2_norm = cutlass::reference::host::TensorRelativeErrorMetric(reference_D.host_view(), tensor_D.host_view()); bool passed = l2_norm < cutlass::MantissaInBits<typename Rank2K::ElementA>::error; return passed; } /// Verifies the result is a Rank2K bool verify( cutlass::gemm::GemmCoord problem_size, ElementCompute alpha, ElementCompute beta) { // // Verify // cutlass::reference::host::Rank2KComplex< typename Rank2K::ElementA, typename Rank2K::LayoutA, typename Rank2K::ElementB, typename Rank2K::LayoutB, typename Rank2K::ElementC, typename Rank2K::LayoutC, ElementCompute, ElementAccumulator >( problem_size, alpha, tensor_A.host_ref(), Rank2K::kTransformA, tensor_B.host_ref(), Rank2K::kTransformB, beta, tensor_C.host_ref(), reference_D.host_ref(), ElementAccumulator(0), Rank2K::kFillModeC, Rank2K::kBlasMode ); return compare_reference(problem_size, alpha, beta); } /// Returns true if the CUDA device is sufficient to execute the kernel. bool sufficient() const { // // Determine SMEM requirements and waive if not satisfied // size_t smem_size = sizeof(typename Rank2K::Rank2Kkernel::SharedStorage); cudaDeviceProp properties; int device_idx; cudaError_t result = cudaGetDevice(&device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDevice() API call failed."); } result = cudaGetDeviceProperties(&properties, device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDeviceProperties() failed"); } if (properties.sharedMemPerBlockOptin < smem_size) { return false; } return true; } /// Executes one test bool run( cutlass::gemm::GemmUniversalMode mode, cutlass::gemm::GemmCoord problem_size, int batch_count = 1, ElementCompute alpha = ElementCompute(1), ElementCompute beta = ElementCompute(0)) { // Waive test if insufficient CUDA device if (!sufficient()) { if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { std::cerr << "Test waived due to insufficient CUDA device." << std::endl; } return true; } #if 0 std::cout << "[TestbedRank2KUniversal::run()] problem(m, n, k): " << problem_size << " alpha: " << ElementCompute(alpha) << " beta: " << ElementCompute(beta) << std::endl; #endif this->initialize(problem_size); // // Initialize the Rank2K operator // typename Rank2K::Arguments arguments{ mode, problem_size, batch_count, {alpha, beta}, tensor_A.device_data(), tensor_B.device_data(), tensor_C.device_data(), tensor_D.device_data(), problem_size.n() * problem_size.k(), problem_size.n() * problem_size.k(), problem_size.m() * problem_size.n(), problem_size.m() * problem_size.n(), tensor_A.layout().stride(0), tensor_B.layout().stride(0), tensor_C.layout().stride(0), tensor_D.layout().stride(0) }; Rank2K rank2k_op; size_t workspace_size = Rank2K::get_workspace_size(arguments); cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); cutlass::Status status = rank2k_op.initialize(arguments, workspace.get()); EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); // // Run the Rank2K // status = rank2k_op(); EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); // // Verify // bool passed = this->verify(problem_size, alpha, beta); //if (true) { if (!passed) { std::stringstream fname; fname << "error_Rank2k_device_" << "fill_mode_c_" << (Rank2K::kFillModeC == cutlass::FillMode::kLower ? "lower_" : (Rank2K::kFillModeC == cutlass::FillMode::kUpper ? "upper_" : "invalid_")) << "mnk_" << problem_size.m() << "x" << problem_size.n() << "x" << problem_size.k() << "_" << Rank2K::ThreadblockShape::kM << "x" << Rank2K::ThreadblockShape::kN << "x" << Rank2K::ThreadblockShape::kK << "_" << Rank2K::WarpShape::kM << "x" << Rank2K::WarpShape::kN << "x" << Rank2K::WarpShape::kK << ".txt"; std::cout << fname.str() << std::endl; std::ofstream results(fname.str()); results << problem_size << std::endl; results << "\nA:\n" << tensor_A.host_view() << "\n" << "\nB:\n" << tensor_B.host_view() << "\n" << "\nC:\n" << tensor_C.host_view() << "\n" << "\nD reference:\n" << reference_D.host_view() << "\n" << "\nD computed:\n" << tensor_D.host_view() << "\n"; } return passed; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Rank2K> bool TestRank2kUniversal( cutlass::gemm::GemmCoord const & problem_size, cutlass::gemm::GemmUniversalMode mode, int batch_count, double alpha = 1.0, double beta = 2.0) { bool passed = true; TestbedRank2KUniversal<Rank2K> testbed; using ElementCompute = typename Rank2K::EpilogueOutputOp::ElementCompute; passed = testbed.run( mode, problem_size, batch_count, cutlass::from_real<ElementCompute>(alpha), cutlass::from_real<ElementCompute>(beta) ); return passed; } template <typename Rank2K> bool TestAllRank2KUniversal() { bool passed = true; int const kMinimumOperandElementSize = int(cutlass::sizeof_bits<typename Rank2K::ElementA>::value); int const kAlignment = cutlass::platform::is_same< typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize; // int8_t gemm alignment constraints int const kAlignmentM = cutlass::platform::is_same<typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value && cutlass::platform::is_same<typename Rank2K::ElementA, int8_t>::value && cutlass::platform::is_same<typename Rank2K::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment; int const kAlignmentN = kAlignmentM; int const kAlignmentK = cutlass::platform::is_same<typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value && cutlass::platform::is_same<typename Rank2K::ElementA, int8_t>::value && cutlass::platform::is_same<typename Rank2K::LayoutA, cutlass::layout::RowMajor>::value ? 4 : kAlignment; cutlass::gemm::GemmUniversalMode modes[] = { cutlass::gemm::GemmUniversalMode::kGemm, }; int problem_size_n[] = { kAlignmentN, 512 - 2*kAlignmentN }; int problem_size_k[] = { kAlignmentK, Rank2K::ThreadblockShape::kK * Rank2K::kStages - kAlignmentK, Rank2K::ThreadblockShape::kK * Rank2K::kStages * 3 - kAlignmentK }; int batch_counts[] = { // may be interpretted as batch count or split-K slices 1 // Just running one batch for now (removing 2, 3, 5, 7) }; double problem_alpha[] = { 1.0, 3.25 }; double problem_beta[] = { 0.0, 2.15 }; using ElementCompute = typename Rank2K::EpilogueOutputOp::ElementCompute; for (cutlass::gemm::GemmUniversalMode mode : modes) { for (int n : problem_size_n) { for (int k : problem_size_k) { for (int batch_count : batch_counts) { for (auto alpha : problem_alpha) { for (auto beta : problem_beta) { if (mode == cutlass::gemm::GemmUniversalMode::kGemm || mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) { // skip very small K problems //if (k / batch_count < 2 * Rank2K::ThreadblockShape::kK) { // continue; //} } cutlass::gemm::GemmCoord problem_size(n, n, k); TestbedRank2KUniversal<Rank2K> testbed; passed = testbed.run( mode, problem_size, batch_count, cutlass::from_real<ElementCompute>(alpha), cutlass::from_real<ElementCompute>(beta) ); if (!passed) { return false; } } } } } } } return passed; } template <typename Rank2K> bool TestAllRank2KHermitianUniversal() { bool passed = true; using ElementCompute = typename Rank2K::EpilogueOutputOp::ElementCompute; using ElementAccumulator = typename Rank2K::ElementAccumulator; int const kMinimumOperandElementSize = int(cutlass::sizeof_bits<typename Rank2K::ElementA>::value); int const kAlignment = cutlass::platform::is_same< typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize; // int8_t gemm alignment constraints int const kAlignmentM = cutlass::platform::is_same<typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value && cutlass::platform::is_same<typename Rank2K::ElementA, int8_t>::value && cutlass::platform::is_same<typename Rank2K::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment; int const kAlignmentN = kAlignmentM; int const kAlignmentK = cutlass::platform::is_same<typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value && cutlass::platform::is_same<typename Rank2K::ElementA, int8_t>::value && cutlass::platform::is_same<typename Rank2K::LayoutA, cutlass::layout::RowMajor>::value ? 4 : kAlignment; cutlass::gemm::GemmUniversalMode modes[] = { cutlass::gemm::GemmUniversalMode::kGemm, }; int problem_size_n[] = { kAlignmentN, 512 - 2*kAlignmentN }; int problem_size_k[] = { kAlignmentK, Rank2K::ThreadblockShape::kK * Rank2K::kStages - kAlignmentK, Rank2K::ThreadblockShape::kK * Rank2K::kStages * 3 - kAlignmentK }; int batch_counts[] = { // may be interpretted as batch count or split-K slices 1 // Just running one batch for now (removing 2, 3, 5, 7) }; /* Complex alpha for HER2K */ ElementAccumulator problem_alpha[] = { {1.0}, {1.25, 3.25}, {-0.25, -2.25} }; ElementAccumulator problem_beta[] = { 0.0, -2.25 }; for (cutlass::gemm::GemmUniversalMode mode : modes) { for (int n : problem_size_n) { for (int k : problem_size_k) { for (int batch_count : batch_counts) { for (auto alpha : problem_alpha) { for (auto beta : problem_beta) { if (mode == cutlass::gemm::GemmUniversalMode::kGemm || mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) { // skip very small K problems //if (k / batch_count < 2 * Rank2K::ThreadblockShape::kK) { // continue; //} } cutlass::gemm::GemmCoord problem_size(n, n, k); TestbedRank2KUniversal<Rank2K> testbed; passed = testbed.run( mode, problem_size, batch_count, alpha, beta ); if (!passed) { return false; } } } } } } } return passed; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace gemm } // namespace test /////////////////////////////////////////////////////////////////////////////////////////////////
test/unit/gemm/device/testbed_rank2k_universal.h/0
{ "file_path": "test/unit/gemm/device/testbed_rank2k_universal.h", "repo_id": "test", "token_count": 8644 }
50
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include "mma_pipelined_testbed.h" ///////////////////////////////////////////////////////////////////////////////////////////////// // sgemm_NT ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM50_sgemm, sgemm_nt_32x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, float, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, float, // ElementB, cutlass::layout::RowMajor, // LayoutB, float, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass, 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_sgemm, sgemm_nt_64x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, float, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, float, // ElementB, cutlass::layout::RowMajor, // LayoutB, float, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_sgemm, sgemm_nt_32x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, float, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, float, // ElementB, cutlass::layout::RowMajor, // LayoutB, float, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_sgemm, sgemm_nt_64x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, float, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, float, // ElementB, cutlass::layout::RowMajor, // LayoutB, float, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 128, 16); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_sgemm, sgemm_nt_128x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, float, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, float, // ElementB, cutlass::layout::RowMajor, // LayoutB, float, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } ///////////////////////////////////////////////////////////////////////////////////////////////// // dgemm_NN ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM50_dgemm, dgemm_nt_32x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, double, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, double, // ElementB, cutlass::layout::RowMajor, // LayoutB, double, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_dgemm, dgemm_nt_64x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, double, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, double, // ElementB, cutlass::layout::RowMajor, // LayoutB, double, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_dgemm, dgemm_nt_32x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, double, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, double, // ElementB, cutlass::layout::RowMajor, // LayoutB, double, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_dgemm, dgemm_nt_64x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, double, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, double, // ElementB, cutlass::layout::RowMajor, // LayoutB, double, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 128, 16); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_dgemm, dgemm_nt_128x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, double, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, double, // ElementB, cutlass::layout::RowMajor, // LayoutB, double, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } ///////////////////////////////////////////////////////////////////////////////////////////////// // igemm_NN ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM50_igemm, igemm_nt_32x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, int, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_igemm, igemm_nt_64x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, int, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_igemm, igemm_nt_32x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, int, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_igemm, igemm_nt_64x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, int, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 128, 16); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_igemm, igemm_nt_128x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, int, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } ///////////////////////////////////////////////////////////////////////////////////////////////// // hgemm_NN ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM50_hgemm, hgemm_nt_32x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, cutlass::half_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, cutlass::half_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, cutlass::half_t, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_hgemm, hgemm_nt_64x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, cutlass::half_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, cutlass::half_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, cutlass::half_t, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_hgemm, hgemm_nt_32x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, cutlass::half_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, cutlass::half_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, cutlass::half_t, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_hgemm, hgemm_nt_64x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, cutlass::half_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, cutlass::half_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, cutlass::half_t, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 128, 16); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_hgemm, hgemm_nt_128x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, cutlass::half_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, cutlass::half_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, cutlass::half_t, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } ///////////////////////////////////////////////////////////////////////////////////////////////// // igemm_NT DP4A ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM61_igemm, igemm_int8_nt_64x64x16_64x64x4) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_64x64x32_64x64x4) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 32>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 32>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 4096); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_64x64x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_128x64x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_128x128x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_256x128x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<256, 256, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<128, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(256, 256, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_128x256x64_64x64x16) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 256, 64>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 256, 64); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_256x128x64_64x64x16) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<256, 128, 64>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(256, 128, 64); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_64x64x16_64x64x4) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_64x64x32_64x64x4) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 32>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 32>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 4096); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_64x64x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_128x64x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_128x128x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_256x128x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<256, 256, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<128, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(256, 256, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_128x256x64_64x64x16) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 256, 64>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 256, 64); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_256x128x64_64x64x16) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<256, 128, 64>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(256, 128, 64); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nn_64x64x16_64x64x4) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); }
test/unit/gemm/threadblock/mma_pipelined_simt.cu/0
{ "file_path": "test/unit/gemm/threadblock/mma_pipelined_simt.cu", "repo_id": "test", "token_count": 25788 }
51
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief unit tests for tensor layout */ #include "../common/cutlass_unit_test.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace layout { void test_NHWC_layout(int n_size, int h_size, int w_size, int c_size) { int ldc = c_size + 1; int ldw = ldc * (w_size + 2); int ldh = ldw * (h_size + 3); cutlass::layout::TensorNHWC::Stride tensor_stride({ ldc, ldw, ldh }); cutlass::layout::TensorNHWC tensor_nhwc(tensor_stride); // test pointer offset for (int n_idx = 0; n_idx < n_size; n_idx++) { for (int h_idx = 0; h_idx < h_size; h_idx++) { for (int w_idx = 0; w_idx < w_size; w_idx++) { for (int c_idx = 0; c_idx < c_size; c_idx++) { cutlass::Tensor4DCoord tensor_coord(n_idx, h_idx, w_idx, c_idx); auto ptr_offset = tensor_nhwc(tensor_coord); decltype(ptr_offset) reference_offset = c_idx + w_idx * ldc + h_idx * ldw + n_idx * ldh; EXPECT_EQ(ptr_offset, reference_offset); } } } } // test stride auto stride = tensor_nhwc.stride(); EXPECT_EQ(stride, tensor_stride); // test capacity auto capacity = tensor_nhwc.capacity(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); decltype(capacity) referece_capacity = ldh * n_size; EXPECT_EQ(capacity, referece_capacity); // test packed auto packed_tensor_layout = tensor_nhwc.packed(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); auto packed_stride = packed_tensor_layout.stride(); EXPECT_EQ(packed_stride, cutlass::layout::TensorNHWC::Stride({ c_size, w_size * c_size, h_size * w_size * c_size })); } void test_NCHW_layout(int n_size, int c_size, int h_size, int w_size) { int ldw = w_size + 1; int ldh = ldw * (h_size + 2); int ldc = ldh * (c_size + 1); cutlass::layout::TensorNCHW::Stride tensor_stride({ ldw, ldh, ldc }); cutlass::layout::TensorNCHW tensor_nchw(tensor_stride); // test pointer offset for (int n_idx = 0; n_idx < n_size; n_idx++) { for (int c_idx = 0; c_idx < c_size; c_idx++) { for (int h_idx = 0; h_idx < w_size; h_idx++) { for (int w_idx = 0; w_idx < c_size; w_idx++) { // tensor4DCoord is always created in nhwc order cutlass::Tensor4DCoord tensor_coord(n_idx, h_idx, w_idx, c_idx); auto ptr_offset = tensor_nchw(tensor_coord); decltype(ptr_offset) reference_offset = w_idx + h_idx * ldw + c_idx * ldh + n_idx * ldc; EXPECT_EQ(ptr_offset, reference_offset); } } } } // test stride auto stride = tensor_nchw.stride(); EXPECT_EQ(stride, tensor_stride); // test capacity auto capacity = tensor_nchw.capacity(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); decltype(capacity) referece_capacity = ldc * n_size; EXPECT_EQ(capacity, referece_capacity); // test packed auto packed_tensor_layout = tensor_nchw.packed(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); auto packed_stride = packed_tensor_layout.stride(); EXPECT_EQ(packed_stride, cutlass::layout::TensorNHWC::Stride({ w_size, w_size * h_size, w_size * h_size * c_size })); } } // namespace layout } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Layout_Tensor, NHWC_32_12_10_14) { int n_size = 32; int h_size = 12; int w_size = 10; int c_size = 14; test::layout::test_NHWC_layout(n_size, h_size, w_size, c_size); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Layout_Tensor, NCHW_32_12_10_14) { int n_size = 32; int c_size = 12; int h_size = 10; int w_size = 14; test::layout::test_NCHW_layout(n_size, c_size, h_size, w_size); } /////////////////////////////////////////////////////////////////////////////////////////////////
test/unit/layout/tensor.cu/0
{ "file_path": "test/unit/layout/tensor.cu", "repo_id": "test", "token_count": 2305 }
52
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit test for the PipelineTmaAsync class */ #define KERNEL_DBG_TRACE false #include "../common/cutlass_unit_test.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> #include <cute/arch/cluster_sm90.hpp> #include <cutlass/util/reference/host/gemm.h> #include <cutlass/cluster_launch.hpp> #include "cutlass/core_io.h" #include "cutlass/util/print_error.hpp" #include "cutlass/util/GPU_Clock.hpp" #include "testbed.h" #include "cutlass/pipeline/pipeline.hpp" #include "cutlass/arch/barrier.h" #include "cute/arch/cluster_sm90.hpp" using namespace cute; //////////////////// KERNEL ///////////////////////// template <uint32_t Stages> struct SharedStorage { typename cutlass::PipelineTmaAsync<Stages>::SharedStorage storage; }; // Goal of this kernel is to complete deadlock-free template <class ClusterShape, uint32_t NumStages> __global__ static void pipeline_device(uint32_t const NumIterations) { extern __shared__ char shared_memory[]; using MainloopPipeline = cutlass::PipelineTmaAsync<NumStages>; using PipelineState = cutlass::PipelineState<NumStages>; using SharedStorage = SharedStorage<NumStages>; SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory); [[maybe_unused]] auto cta_layout = Layout<ClusterShape>{}; // (m,n) -> cta_id int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); int warp_group_thread_idx = threadIdx.x % 128; dim3 block_id_in_cluster = cute::block_id_in_cluster(); auto cluster_shape = ClusterShape{}; // #Producers = #RowsInCluster + #ColsInCluster - 1 uint32_t const NumProducers = cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1; uint32_t const TmaTransactionBytes = sizeof(uint32_t) * NumProducers; uint32_t const per_cta_bytes = sizeof(uint32_t); // mbarrier.init typename MainloopPipeline::Params params; params.transaction_bytes = TmaTransactionBytes; params.role = MainloopPipeline::ThreadCategory::ProducerConsumer; params.is_leader = warp_group_thread_idx == 0; params.num_consumers = 128; MainloopPipeline pipeline(shared_storage.storage, params, cluster_shape); __syncthreads(); // Ensure All CTAs in Cluster have completed init before issuing commits cute::cluster_arrive_relaxed(); cute::cluster_wait(); // Total number of gemm_k_iterations auto mma_k_iterations = NumIterations; auto tma_k_iterations = NumIterations; PipelineState smem_pipe_read; // For the DMA (prologue) - we start with an opposite phase - since we skip all waits // i.e., we know that the buffer is indeed empty PipelineState smem_pipe_write = cutlass::make_producer_start_state<MainloopPipeline>(); PipelineState smem_pipe_release; int K_TILE_MMAS = 1; int lane_predicate = cute::elect_one_sync(); int k_pipe_tma_prologue = min(NumStages, tma_k_iterations); // DMA Prologue (Loads) CUTLASS_PRAGMA_UNROLL for(int i = 0; i < k_pipe_tma_prologue; ++i) { pipeline.producer_acquire(smem_pipe_write); // cp.async.bulk.tensor would typically happen here pipeline.producer_commit(smem_pipe_write, per_cta_bytes); ++smem_pipe_write; } tma_k_iterations -= k_pipe_tma_prologue; // MMA Prologue (Compute) - modeling inflight MMAs for (int iter = 0; iter < K_TILE_MMAS; ++iter) { pipeline.consumer_wait(smem_pipe_read); warpgroup_arrive(); // GMMA would typically happen here ++smem_pipe_read; } mma_k_iterations -= K_TILE_MMAS; CUTLASS_PRAGMA_NO_UNROLL for (int iter = 0; iter < mma_k_iterations; ++iter) { pipeline.consumer_wait(smem_pipe_read); warpgroup_arrive(); // GMMA would typically happen here pipeline.consumer_release(smem_pipe_release); if (lane_predicate && (warp_idx == 0) && (tma_k_iterations > 0)) { pipeline.producer_acquire(smem_pipe_write); // cp.async.bulk.tensor would typically happen here pipeline.producer_commit(smem_pipe_write, per_cta_bytes); ++smem_pipe_write; --tma_k_iterations; } // next read stage ++smem_pipe_read; ++smem_pipe_release; } // To make sure remote SMEM doesn't get destoryed cute::cluster_arrive(); cute::cluster_wait(); } ///////////////////////////////////////////////////// /// Device NT GMMA + TMA specialized template<uint32_t Stages_, typename ClusterShape_> struct PipelineTest { // // Data members // static constexpr uint32_t Stages = Stages_; static constexpr uint32_t kBlockSize = 128; using ClusterShape = ClusterShape_; // // Methods // // Ctor PipelineTest(){}; // Run CuTe GEMM kernel cudaError_t run(uint32_t const kNumIters, cudaStream_t stream = 0) { float elapsed_ms = 0.0f; // Pipeline (multistage pipeline) [[maybe_unused]] auto num_stages = Int<Stages>{}; auto cluster_shape = Shape<Int<ClusterShape::kM>, Int<ClusterShape::kN>, _1>{}; // // Configure and launch // int iterations = 1; cudaEvent_t events[2]; cudaError_t result; for (cudaEvent_t & event : events) { result = cudaEventCreate(&event); if (result != cudaSuccess) { std::cerr << "Error: Failed to create event."; return result; } } result = cudaEventRecord(events[0]); if (result != cudaSuccess) { std::cerr << "Error: Failed to record start event."; return result; } for (int iter = 0; iter < iterations; ++iter) { int smem_size = int(sizeof(SharedStorage<Stages>)); result = cudaFuncSetAttribute( pipeline_device<decltype(cluster_shape), Stages>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); // Launch a single Cluster, with 128 thread per CTA dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), 1); dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1); dim3 dimBlock(kBlockSize,1,1); const void* kernel = (const void*)pipeline_device<decltype(cluster_shape), Stages>; int iters = kNumIters; void* kernel_params[] = {reinterpret_cast<void*>(&iters)}; cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params); } // profiling loop ends result = cudaEventRecord(events[1]); if (result != cudaSuccess) { std::cerr << "Error: Failed to record stop event."; return result; } result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl; return result; } result = cudaEventElapsedTime(&elapsed_ms, events[0], events[1]); if (result != cudaSuccess) { std::cerr << "Failed to create event."; return result; } for (cudaEvent_t & event : events) { (void)cudaEventDestroy(event); } return cudaSuccess; } }; #if CUDA_12_0_SM90_FEATURES_SUPPORTED TEST(SM90_Verify_PipelineTmaAsync, Cluster1x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x1_Stage5) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 5; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x1_Stage10) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 10; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x2_Stage5) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 5; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x2_Stage10) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 10; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x4_Stage10) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; static constexpr uint32_t Stages = 10; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x2_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x2_Stage10) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; static constexpr uint32_t Stages = 10; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x1_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x1_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x4_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x4_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x2_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } #endif
test/unit/pipeline/pipeline_tma_async.cu/0
{ "file_path": "test/unit/pipeline/pipeline_tma_async.cu", "repo_id": "test", "token_count": 5494 }
53
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief BLAS-like handle used to launch operations on the CUDA device. */ #pragma once #include <memory> #include "cutlass/library/library.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace library { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Handle object class Handle { private: /// Host workspace static int const kHostWorkspaceSize = (4 << 10); /// Provider of operations Provider provider_; /// CUDA device properties cudaDeviceProp device_; /// CUDA stream cudaStream_t stream_; /// Device workspace void *workspace_; /// Size of device workspace in bytes size_t workspace_size_; /// Indicates whether scalars are host or device pointers ScalarPointerMode scalar_pointer_mode_; /// Pointer to the most recently executed operation Operation const *last_operation_; public: /// Constructor Handle(cudaStream_t stream = nullptr, size_t workspace_size = (4<<20)); /// Destructor ~Handle(); /// Move constructor Handle(Handle && handle); /// Move assignment operator Handle &operator=(Handle && handle); // // Persistent state accessors // /// Returns compute capability of the selected device int compute_capability() const; /// Sets the current CUDA stream void set_stream(cudaStream_t stream); /// Gets the current CUDA stream cudaStream_t get_stream() const; /// Gets the current provider Provider get_provider() const; /// Sets the provider of operations void set_provider(Provider provider); /// Gets the device workspace size size_t get_workspace_size() const; /// Gets a pointer to the device workspace allocation in Global Memory void *get_workspace() const; /// Sets the size of device workspace, invalidating calls to get_device_workspace() void set_workspace_size(size_t bytes); /// Gets the scalar pointer mode ScalarPointerMode get_scalar_pointer_mode() const; /// Sets the scalar pointer mode void set_scalar_pointer_mode(ScalarPointerMode mode); /// Gets the most recently executed operation Operation const *get_last_operation() const; // // Computations // /// Executes a GEMM computation: D <= alpha * A*B + beta * C Status gemm( int M, /// GEMM M dimension int N, /// GEMM N dimension int K, /// GEMM K dimension NumericTypeID element_compute, /// Data type of internal accumulation NumericTypeID element_scalar, /// Data type of alpha/beta scalars void const *alpha, /// Pointer to alpha scalar NumericTypeID element_A, /// Data type of A matrix elements LayoutTypeID layout_A, /// Layout of A matrix ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices void const * ptr_A, /// Pointer to A matrix in Global Memory int64_t lda, /// Leading dimension of A matrix NumericTypeID element_B, /// Data type of B matrix elements LayoutTypeID layout_B, /// Layout of B matrix ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices void const * ptr_B, /// Pointer to B matrix in Global Memory int64_t ldb, /// Leading dimension of B matrix void const * beta, /// Pointer to beta scalar NumericTypeID element_C, /// Data type of C and D matrices void const * ptr_C, /// Pointer to C matrix int64_t ldc, /// Leading dimension of C matrix void * ptr_D, /// Pointer to D matrix int64_t ldd /// Leading dimension of D matrix ); /// Executes a GEMM computation: D <= alpha * A*B + beta * C. // // Supports batched-strided, batched array or split-K serial or split-K parallel. // Status gemm_universal( GemmUniversalMode mode, /// indicates the mode in which the kUniversal GEMM is launched int M, /// GEMM M dimension int N, /// GEMM N dimension int K, /// GEMM K dimension NumericTypeID element_compute, /// Data type of internal accumulation NumericTypeID element_scalar, /// Data type of alpha/beta scalars void const *alpha, /// Pointer to alpha scalar NumericTypeID element_A, /// Data type of A matrix elements LayoutTypeID layout_A, /// Layout of A matrix ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices void const * ptr_A, /// Pointer to A matrix in Global Memory int64_t lda, /// Leading dimension of A matrix NumericTypeID element_B, /// Data type of B matrix elements LayoutTypeID layout_B, /// Layout of B matrix ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices void const * ptr_B, /// Pointer to B matrix in Global Memory int64_t ldb, /// Leading dimension of B matrix void const * beta, /// Pointer to beta scalar NumericTypeID element_C, /// Data type of C matrix LayoutTypeID layout_C, /// Layout of D matrix void const * ptr_C, /// Pointer to C matrix int64_t ldc, /// Leading dimension of C matrix NumericTypeID element_D, /// Data type of D matrix LayoutTypeID layout_D, /// Layout of D matrix void * ptr_D, /// Pointer to D matrix int64_t ldd, /// Leading dimension of D matrix int batch_count = 1, /// Batch count or number of split-K slices int64_t batch_stride_A = 0, /// Batch stride of A operand int64_t batch_stride_B = 0, /// Batch stride of B operand int64_t batch_stride_C = 0, /// Batch stride of C operand int64_t batch_stride_D = 0 /// Batch stride of D operand ); /// Planar complex GEMM /// /// Note, all data types are the real-valued base types used by the planar-complex GEMM kernel. /// Status gemm_planar_complex( int M, /// GEMM M dimension int N, /// GEMM N dimension int K, /// GEMM K dimension NumericTypeID element_compute, /// Data type of internal accumulation NumericTypeID element_scalar, /// Data type of alpha/beta scalars void const *alpha, /// Pointer to alpha scalar NumericTypeID element_A, /// Data type of A matrix elements LayoutTypeID layout_A, /// Layout of A matrix ComplexTransform transform_A, /// Complex transformation applied to A matrix void const * ptr_A_real, /// Pointer to real part of A matrix void const * ptr_A_imag, /// Pointer to imaginary part of A matrix int64_t lda_real, /// Leading dimension of real part of A matrix int64_t lda_imag, /// Leading dimension of imaginary part of A matrix NumericTypeID element_B, /// Data type of B matrix elements LayoutTypeID layout_B, /// Layout of B matrix ComplexTransform transform_B, /// Complex transformation applied to B matrix void const * ptr_B_real, /// Pointer to real part of B matrix void const * ptr_B_imag, /// Pointer to imaginary part of B matrix int64_t ldb_real, /// Leading dimension of real part of B matrix int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix void const * beta, /// Pointer to beta scalar NumericTypeID element_C, /// Data type of C and D matrix void const * ptr_C_real, /// Pointer to real part of C matrix void const * ptr_C_imag, /// Pointer to imaginary part of C matrix int64_t ldc_real, /// Leading dimension of real part of C matrix int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix void * ptr_D_real, /// Pointer to real part of D matrix void * ptr_D_imag, /// Pointer to imaginary part of D matrix int64_t ldd_real, /// Leading dimension of real part of D matrix int64_t ldd_imag, /// Leading dimension of imaginary part of D matrix int batch_count = 1, /// Number of batched GEMMs to execute int64_t batch_stride_A_real = 0, int64_t batch_stride_A_imag = 0, int64_t batch_stride_B_real = 0, int64_t batch_stride_B_imag = 0, int64_t batch_stride_C_real = 0, int64_t batch_stride_C_imag = 0, int64_t batch_stride_D_real = 0, int64_t batch_stride_D_imag = 0 ); /// Planar complex GEMM loading pointers from arrays in global memory Status gemm_planar_complex_array( int expected_M, /// Expected GEMM M dimension (used for sizing CUDA grid) int expected_N, /// Expected GEMM N dimension (used for sizing CUDA grid) int expected_K, /// Expected GEMM K dimension int batch_count, /// Number of independent GEMM computations to execute int const *M, /// Array containing the GEMM M dimension for each batch index int const *N, /// Array containing the GEMM N dimension for each batch index int const *K, /// Array containing the GEMM K dimension for each batch index NumericTypeID element_compute, /// Data type of internal accumulation NumericTypeID element_scalar, /// Data type of alpha/beta scalars void const *alpha, /// Pointer to alpha scalar NumericTypeID element_A, /// Data type of A matrix elements LayoutTypeID layout_A, /// Layout of A matrix ComplexTransform transform_A, /// Complex transformation applied to A matrix void const * const * ptr_A_real, /// Pointer to array containing pointers to real part of A matrices void const * const * ptr_A_imag, /// Pointer to array containing pointers to imaginary part of A matrices int64_t lda_real, /// Leading dimension of real part of A matrix int64_t lda_imag, /// Leading dimension of imaginary part of A matrix NumericTypeID element_B, /// Data type of B matrix elements LayoutTypeID layout_B, /// Layout of B matrix ComplexTransform transform_B, /// Complex transformation applied to B matrix void const * const * ptr_B_real, /// Pointer to array containing pointers to real part of B matrices void const * const * ptr_B_imag, /// Pointer to array containing pointers to imaginary part of B matrices int64_t ldb_real, /// Leading dimension of real part of B matrix int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix void const * beta, /// Pointer to beta scalar NumericTypeID element_C, /// Data type of C and D matrix void const * const * ptr_C_real, /// Pointer to array containing pointers to real part of C matrices void const * const * ptr_C_imag, /// Pointer to array containing pointers to imaginary part of C matrices int64_t ldc_real, /// Leading dimension of real part of C matrix int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix void * const * ptr_D_real, /// Pointer to array containing pointers to real part of D matrices void * const * ptr_D_imag, /// Pointer to array containing pointers to imaginary part of D matrices int64_t ldd_real, /// Leading dimension of real part of D matrix int64_t ldd_imag /// Leading dimension of imaginary part of D matrix ); }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Unique pointer storing the handle using HandlePtr = std::unique_ptr<Handle>; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Finds conv2d operation instances with Conv2d::ElementC = Reduction::ElementWorkspace Operation const* find_conv_operation_for_parallel_reduction(Operation const *operation); ///////////////////////////////////////////////////////////////////////////////////////////////// /// Finds gemm operation instances with ElementC = Reduction::ElementWorkspace Operation const* find_gemm_operation_for_parallel_reduction(Operation const *operation); ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
tools/library/include/cutlass/library/handle.h/0
{ "file_path": "tools/library/include/cutlass/library/handle.h", "repo_id": "tools", "token_count": 6327 }
54