text
stringlengths 64
2.42M
| id
stringlengths 10
118
| metadata
dict | __index_level_0__
int64 0
65
|
---|---|---|---|
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines layout functions used by TensorRef and derived classes for common 4-D and 5-D
tensor formats.
Layout functions map logical coordinates to linear memory. They often require additional
data to describe strides between elements.
Layout functions must implement all members in the public interface of IdentityTensorLayout<>
defined in cutlass/tensor_ref.h.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include "assert.h"
#endif
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/coord.h"
#include "cutlass/tensor_coord.h"
namespace cutlass {
namespace layout {
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Defines data layouts of various tensor formats usable by TensorRef and other classes.
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag used for 3-D NWC tensors for 1-D convolutions; only used in 3.x API
class TensorNWC {};
/// Tag used for n-D KCSRT tensors for n-D convolutions; only used in 3.x API for wgrad output layouts
class TensorKCS {};
class TensorKCSR {};
class TensorKCSRT {};
/// Tag used for n-D CSRTK tensors for n-D convolutions; only used in 3.x API for wgrad output layouts
class TensorCSK {};
class TensorCSRK {};
class TensorCSRTK {};
/// Mapping function for 4-D NHWC tensors.
class TensorNHWC {
public:
/// Logical rank of tensor
static int const kRank = 4;
/// Rank of stride vector
static int const kStrideRank = 3;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate (n, h, w, c)
using TensorCoord = Tensor4DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [stride_w, stride_h, stride_n]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorNHWC(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorNHWC(
typename Stride::Index stride_w, ///< number of elements between adjacent W coordinates
typename Stride::Index stride_h, ///< number of elements between adjacent H coordinates
typename Stride::Index stride_n ///< number of elements between adjacent N coordinates
):
stride_(make_Coord(stride_w, stride_h, stride_n)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorNHWC(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]))
) { }
/// Helper returns a layout to a tightly packed NHWC tensor.
CUTLASS_HOST_DEVICE
static TensorNHWC packed(TensorCoord const &extent) {
return TensorNHWC(
make_Coord(
extent.c(),
extent.w() * extent.c(),
extent.h() * extent.w() * extent.c()
)
);
}
/// Returns the offset of a coordinate (n, h, w, c) in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return coord.c() +
LongIndex(stride_[0] * coord.w()) +
LongIndex(stride_[1] * coord.h()) +
LongIndex(stride_[2] * coord.n());
}
/// Returns the offset of a pitchlinear coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return coord.contiguous() + LongIndex(coord.strided() * stride_[2]);
}
/// Returns the logical coordinate (n, h, w, c) from a given offset in linear memory.
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex index) const {
int n = 0, h = 0, w = 0, c = 0;
#if defined(__CUDA_ARCH__)
int tmp = 0;
c = int(index % static_cast<int>(stride_[0]));
unsigned int hw_mul, hw_shr, w_mul, w_shr, c_mul, c_shr;
find_divisor(hw_mul, hw_shr, stride_[2]);
find_divisor(w_mul, w_shr, stride_[1]);
find_divisor(c_mul, c_shr, stride_[0]);
fast_divmod(n, tmp, index, int(stride_[2]), hw_mul, hw_shr);
fast_divmod(h, w, tmp, int(stride_[1]), w_mul, w_shr);
fast_divmod(w, tmp, w, int(stride_[0]), c_mul, c_shr);
#else
n = int(index / stride_[2]);
LongIndex residual = index % stride_[2];
h = int(residual / stride_[1]);
residual = (residual % stride_[1]);
w = int(residual / stride_[0]);
c = int(residual % stride_[0]);
#endif
return TensorCoord(n, h, w, c);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
// it does not make sense if the extent is larger than stride
// and we could not rely on the capacity calculation in such cases
// we could move this checkers to debug code only
if ((extent.c() > stride_[0])
|| (extent.w() * stride_[0] > stride_[1])
|| (extent.h() * stride_[1] > stride_[2])) {
assert(0);
}
return extent.n() * stride_[2];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 4-D NCHW tensors.
class TensorNCHW {
public:
/// Logical rank of tensor
static int const kRank = 4;
/// Rank of stride vector
static int const kStrideRank = 3;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Tensor4DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [w, hw, chw]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorNCHW(Stride const &stride = Stride(0)): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorNCHW packed(TensorCoord const &extent) {
return TensorNCHW(
make_Coord(
extent.w(),
extent.w() * extent.h(),
extent.h() * extent.w() * extent.c()
)
);
}
/// Returns the offset of a coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return coord.w() +
LongIndex(stride_[0] * coord.h()) +
LongIndex(stride_[1] * coord.c()) +
LongIndex(stride_[2] * coord.n());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent.n() * stride_[2];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 4-D NC/xHWx tensors.
template <int Interleave>
class TensorNCxHWx {
public:
/// Interleaving quantity
static int const kInterleave = Interleave;
/// Logical rank of tensor
static int const kRank = 4;
/// Rank of stride vector
static int const kStrideRank = 3;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Tensor4DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [Interleave x w, Interleave x wh, hwc]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorNCxHWx(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorNCxHWx(
typename Stride::Index stride_w, ///< number of elements between adjacent W coordinates
typename Stride::Index stride_h, ///< number of elements between adjacent H coordinates
typename Stride::Index stride_n ///< number of elements between adjacent N coordinates
):
stride_(make_Coord(stride_w, stride_h, stride_n)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorNCxHWx(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]))
) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorNCxHWx packed(TensorCoord const &extent) {
return TensorNCxHWx(
make_Coord(
kInterleave * extent.w(),
kInterleave * extent.w() * extent.h(),
extent.h() * extent.w() * extent.c()
)
);
}
/// Returns the offset of a coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
Index c_minor = (coord.c() % kInterleave);
Index c_major = (coord.c() / kInterleave);
return c_minor +
LongIndex(kInterleave * coord.w()) +
LongIndex(stride_[0] * coord.h()) +
LongIndex(stride_[1] * c_major) +
LongIndex(stride_[2] * coord.n());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent.n() * stride_[2];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 4-D CxRSKx tensors.
template <int Interleave>
class TensorCxRSKx {
public:
/// Interleaving quantity
static int const kInterleave = Interleave;
/// Logical rank of tensor
static int const kRank = 4;
/// Rank of stride vector
static int const kStrideRank = 3;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Tensor4DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [Interleave x n, Interleave x nw, Interleave x nwh]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorCxRSKx(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorCxRSKx(
typename Stride::Index stride_w, ///< number of elements between adjacent W coordinates
typename Stride::Index stride_h, ///< number of elements between adjacent H coordinates
typename Stride::Index stride_n ///< number of elements between adjacent N coordinates
):
stride_(make_Coord(stride_w, stride_h, stride_n)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorCxRSKx(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]))
) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static TensorCxRSKx packed(TensorCoord const &extent) {
return TensorCxRSKx(
make_Coord(
kInterleave * extent.n(),
kInterleave * extent.n() * extent.w(),
kInterleave * extent.n() * extent.w() * extent.h()
)
);
}
/// Returns the offset of a coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
Index c_minor = (coord.c() % kInterleave);
Index c_major = (coord.c() / kInterleave);
return c_minor +
LongIndex(kInterleave * coord.n()) +
LongIndex(stride_[0] * coord.w()) +
LongIndex(stride_[1] * coord.h()) +
LongIndex(stride_[2] * c_major);
}
/// Returns the offset of a pitchlinear coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord const &coord) const {
return (coord.contiguous() % kInterleave) +
LongIndex((coord.contiguous() / kInterleave) * stride_[2]) +
LongIndex(coord.strided() * kInterleave);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return (extent.c() / kInterleave * stride_[2]);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Mapping function for 5-D NDHWC tensors.
class TensorNDHWC {
public:
/// Logical rank of tensor
static int const kRank = 5;
/// Rank of stride vector
static int const kStrideRank = 4;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate (n, d, h, w, c)
using TensorCoord = Tensor5DCoord;
/// Stride vector
using Stride = Coord<kStrideRank>;
private:
//
// Data members
//
/// Stride data member - [c, wc, hwc, dhwc]
Stride stride_;
public:
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
TensorNDHWC(Stride const &stride = Stride(0)): stride_(stride) { }
/// Constructor
CUTLASS_HOST_DEVICE
TensorNDHWC(
typename Stride::Index c,
typename Stride::Index wc,
typename Stride::Index hwc,
typename Stride::Index dhwc):
stride_(make_Coord(c, wc, hwc, dhwc)) { }
/// Constructor
// Once convolutions implement 64b stride this ctor can be deleted
CUTLASS_HOST_DEVICE
TensorNDHWC(Coord<kStrideRank, LongIndex> const &stride):
stride_(make_Coord(
static_cast<typename Stride::Index>(stride[0]),
static_cast<typename Stride::Index>(stride[1]),
static_cast<typename Stride::Index>(stride[2]),
static_cast<typename Stride::Index>(stride[3]))
) { }
/// Helper returns a layout to a tightly packed NHWC tensor.
CUTLASS_HOST_DEVICE
static TensorNDHWC packed(TensorCoord const &extent) {
return TensorNDHWC(
make_Coord(
extent.c(),
extent.w() * extent.c(),
extent.h() * extent.w() * extent.c(),
extent.d() * extent.h() * extent.w() * extent.c()
)
);
}
/// Returns the offset of a coordinate (n, d, h, w, c) in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return coord.c() +
LongIndex(stride_[0] * coord.w()) +
LongIndex(stride_[1] * coord.h()) +
LongIndex(stride_[2] * coord.d()) +
LongIndex(stride_[3] * coord.n());
}
/// Returns the offset of a pitchlinear coordinate in linear memory.
CUTLASS_HOST_DEVICE
LongIndex operator()(PitchLinearCoord coord) const {
return coord.contiguous() + LongIndex(coord.strided() * stride_[3]);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
// it does not make sense if the extent is larger than stride
// and we could not rely on the capacity calculation in such cases
// we could move this checkers to debug code only
if ((extent.c() > stride_[0])
|| (extent.w() * stride_[0] > stride_[1])
|| (extent.h() * stride_[1] > stride_[2])
|| (extent.d() * stride_[2] > stride_[3])) {
assert(0);
}
return extent.n() * stride_[3];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace layout
} // namespace cutlass
| include/cutlass/layout/tensor.h/0 | {
"file_path": "include/cutlass/layout/tensor.h",
"repo_id": "include",
"token_count": 6644
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a densely packed quaternion object intended for storing data in registers and
executing quaternion operations within a CUDA or host thread.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/functional.h"
#include "cutlass/array.h"
#include "cutlass/real.h"
#include "cutlass/coord.h"
#include "cutlass/matrix.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/vector.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Quaternion: xi + yj + zk + w
template <
typename Element_ = float ///< element type
>
class Quaternion : public Array<Element_, 4> {
public:
/// Logical rank of tensor index space
static int const kRank = 1;
/// Number of elements
static int const kExtent = 4;
/// Base class is a four-element array
using Base = Array<Element_, kExtent>;
/// Element type
using Element = typename Base::Element;
/// Reference type to an element
using Reference = typename Base::reference;
/// Index type
using Index = int;
/// Quaternion storage - imaginary part
static int const kX = 0;
/// Quaternion storage - imaginary part
static int const kY = 1;
/// Quaternion storage - imaginary part
static int const kZ = 2;
/// Quaternion storage - real part
static int const kW = 3;
public:
//
// Methods
//
/// Constructs a quaternion q = 0
CUTLASS_HOST_DEVICE
Quaternion() {
Base::at(kX) = Element();
Base::at(kY) = Element();
Base::at(kZ) = Element();
Base::at(kW) = Element();
}
/// Constructs a quaternion q = w + 0*i + 0*j + 0*k
CUTLASS_HOST_DEVICE
Quaternion(
Element w_
) {
Base::at(kX) = Element();
Base::at(kY) = Element();
Base::at(kZ) = Element();
Base::at(kW) = w_;
}
/// Constructs a quaternion q = w + x*i + y*j + z*k
CUTLASS_HOST_DEVICE
Quaternion(
Element x_,
Element y_,
Element z_,
Element w_
) {
Base::at(kX) = x_;
Base::at(kY) = y_;
Base::at(kZ) = z_;
Base::at(kW) = w_;
}
/// Constructs a quaternion from a vector representing the imaginary part and a real number
CUTLASS_HOST_DEVICE
Quaternion(
Matrix3x1<Element> const &imag_,
Element w_ = Element()
) {
Base::at(kX) = imag_[0];
Base::at(kY) = imag_[1];
Base::at(kZ) = imag_[2];
Base::at(kW) = w_;
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference at(Index idx) const {
return Base::at(idx);
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference at(Index idx) {
return Base::at(idx);
}
/// Accesses the x element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Element x() const {
return Base::at(kX);
}
/// Accesses the x element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Reference x() {
return Base::at(kX);
}
/// Accesses the y element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Element y() const {
return Base::at(kY);
}
/// Accesses the y element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Reference y() {
return Base::at(kY);
}
/// Accesses the z element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Element z() const {
return Base::at(kZ);
}
/// Accesses the z element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Reference z() {
return Base::at(kZ);
}
/// Accesses the real part of the quaternion
CUTLASS_HOST_DEVICE
Element w() const {
return Base::at(kW);
}
/// Accesses the real part of the quaternion
CUTLASS_HOST_DEVICE
Reference w() {
return Base::at(kW);
}
/// Returns the pure imaginary part of the quaternion as a 3-vector
CUTLASS_HOST_DEVICE
Matrix3x1<Element> pure() const {
return Matrix3x1<Element>(x(), y(), z());
}
/// Returns a quaternion representation of a spatial rotation given a unit-length axis and
/// a rotation in radians.
CUTLASS_HOST_DEVICE
static Quaternion<Element> rotation(
Matrix3x1<Element> const &axis_unit, ///< axis of rotation (assumed to be unit length)
Element theta) { ///< angular rotation in radians
Element s = fast_sin(theta / Element(2));
return Quaternion(
s * axis_unit[0],
s * axis_unit[1],
s * axis_unit[2],
fast_cos(theta / Element(2))
);
}
/// Returns a quaternion representation of a spatial rotation represented as a
/// unit-length rotation axis (r_x, r_y, r_z) and an angular rotation in radians
CUTLASS_HOST_DEVICE
static Quaternion<Element> rotation(
Element r_x,
Element r_y,
Element r_z,
Element theta) { ///< angular rotation in radians
return rotation({r_x, r_y, r_z}, theta);
}
/// Geometric rotation of a 3-element vector
CUTLASS_HOST_DEVICE
Matrix3x1<Element> rotate(Matrix3x1<Element> const &rhs) const {
return (*this * Quaternion<Element>(rhs, 0) * reciprocal(*this)).pure();
}
/// Inverse rotation operation
CUTLASS_HOST_DEVICE
Matrix3x1<Element> rotate_inv(Matrix3x1<Element> const &rhs) const {
return (reciprocal(*this) * Quaternion<Element>(rhs, 0) * *this).pure();
}
/// Rotates a 3-vector assuming this is a unit quaternion (a spinor)
CUTLASS_HOST_DEVICE
Matrix3x1<Element> spinor(Matrix3x1<Element> const &rhs) const {
return (*this * Quaternion<Element>(rhs, 0) * conj(*this)).pure();
}
/// Inverse rotation of 3-vector assuming this is a unit quaternion (a spinor)
CUTLASS_HOST_DEVICE
Matrix3x1<Element> spinor_inv(Matrix3x1<Element> const &rhs) const {
return (conj(*this) * Quaternion<Element>(rhs, 0) * *this).pure();
}
/// In-place addition
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator+=(Quaternion<Element> const &rhs) {
*this = (*this + rhs);
return *this;
}
/// In-place subtraction
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator-=(Quaternion<Element> const &rhs) {
*this = (*this - rhs);
return *this;
}
/// In-place multiplication
template <typename T>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator*=(Quaternion<Element> const &rhs) {
*this = (*this * rhs);
return *this;
}
/// Scalar multiplication
template <typename T>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator*=(Element s) {
*this = (*this * s);
return *this;
}
/// In-place Division
template <typename T>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator/=(Quaternion<Element> const &rhs) {
*this = (*this / rhs);
return *this;
}
/// In-place Division
template <typename T>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator/=(Element s) {
*this = (*this / s);
return *this;
}
/// Computes a 3x3 rotation matrix (row-major representation)
CUTLASS_HOST_DEVICE
Matrix3x3<Element> as_rotation_matrix_3x3() const {
Matrix3x3<Element> m(
w() * w() + x() * x() - y() * y() - z() * z(),
2 * x() * y() - 2 * w() * z(),
2 * x() * z() + 2 * w() * y(),
2 * x() * y() + 2 * w() * z(),
w() * w() - x() * x() + y() * y() - z() * z(),
2 * y() * z() - 2 * w() * x(),
2 * x() * z() - 2 * w() * y(),
2 * y() * z() + 2 * w() * x(),
w() * w() - x() * x() - y() * y() + z() * z()
);
return m;
}
/// Computes a 4x4 rotation matrix (row-major representation)
CUTLASS_HOST_DEVICE
Matrix4x4<Element> as_rotation_matrix_4x4() const {
Matrix4x4<Element> m = Matrix4x4<Element>::identity();
m.set_slice_3x3(as_rotation_matrix_3x3());
return m;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructs a quaternion that is non-zero only in its real element.
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> make_Quaternion(
Element w) { ///< real part
return Quaternion<Element>(w);
}
/// Constructs a quaternion from a vector and real
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> make_Quaternion(
Matrix3x1<Element> const &imag, ///< imaginary party as a vector
Element w) { ///< real part
return Quaternion<Element>(imag, w);
}
/// Constructs a quaternion from a unit-length rotation axis and a rotation
/// angle in radians
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> make_QuaternionRotation(
Matrix3x1<Element> const &axis_unit, ///< rotation axis (unit-length)
Element w) { ///< rotation angle in radians
return Quaternion<Element>::rotation(axis_unit, w);
}
/// Constructs a quaternion q = xi + yj + zk + w
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> make_Quaternion(Element x, Element y, Element z, Element w) {
return Quaternion<Element>(x, y, z, w);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns the real part of the quaternion number
template <typename Element>
CUTLASS_HOST_DEVICE
Element const &real(Quaternion<Element> const &q) {
return q.w();
}
/// Returns the real part of the quaternion number
template <typename Element>
CUTLASS_HOST_DEVICE
Element &real(Quaternion<Element> &q) {
return q.w();
}
/// Returns the magnitude of the quaternion number
template <typename Element>
CUTLASS_HOST_DEVICE
Element abs(Quaternion<Element> const &q) {
return fast_sqrt(norm(q));
}
/// Quaternion conjugate
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> conj(Quaternion<Element> const &q) {
return make_Quaternion(
-q.x(),
-q.y(),
-q.z(),
q.w()
);
}
/// Computes the squared magnitude of the quaternion
template <typename Element>
CUTLASS_HOST_DEVICE
Element norm(Quaternion<Element> const &q) {
return q.x() * q.x() + q.y() * q.y() + q.z() * q.z() + q.w() * q.w();
}
/// Quaternion reciprocal
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> reciprocal(Quaternion<Element> const &q) {
Element nsq = norm(q);
return make_Quaternion(
-q.x() / nsq,
-q.y() / nsq,
-q.z() / nsq,
q.w() / nsq
);
}
/// Returns a unit-length quaternion
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> unit(Quaternion<Element> const &q) {
Element rcp_mag = Element(1) / abs(q);
return make_Quaternion(
q.x() * rcp_mag,
q.y() * rcp_mag,
q.z() * rcp_mag,
q.w() * rcp_mag
);
}
/// Quaternion exponential
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> exp(Quaternion<Element> const &q) {
Element exp_ = fast_exp(q.w());
Element imag_norm = fast_sqrt(q.x() * q.x() + q.y() * q.y() + q.z() * q.z());
Element sin_norm = fast_sin(imag_norm);
return make_Quaternion(
exp_ * q.x() * sin_norm / imag_norm,
exp_ * q.y() * sin_norm / imag_norm,
exp_ * q.z() * sin_norm / imag_norm,
exp_ * fast_cos(imag_norm)
);
}
/// Quaternion natural logarithm
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> log(Quaternion<Element> const &q) {
Element v = fast_sqrt(q.x() * q.x() + q.y() * q.y() + q.z() * q.z());
Element s = fast_acos(q.w() / abs(q)) / v;
return make_Quaternion(
q.x() * s,
q.y() * s,
q.z() * s,
fast_log(q.w())
);
}
/// Gets the rotation angle from a unit-length quaternion
template <typename Element>
CUTLASS_HOST_DEVICE
Element get_rotation_angle(Quaternion<Element> const &q_unit) {
return fast_acos(q_unit.w()) * Element(2);
}
/// Gets the rotation axis from a unit-length quaternion
template <typename Element>
CUTLASS_HOST_DEVICE
Matrix3x1<Element> get_rotation_axis(Quaternion<Element> const &q_unit) {
return q_unit.pure().unit();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Equality operator
template <typename Element>
CUTLASS_HOST_DEVICE
bool operator==(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return lhs.x() == rhs.x() &&
lhs.y() == rhs.y() &&
lhs.z() == rhs.z() &&
lhs.w() == rhs.w();
}
/// Inequality operator
template <typename Element>
CUTLASS_HOST_DEVICE
bool operator!=(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return !(lhs == rhs);
}
/// Quaternion scalar multiplication
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator*(Quaternion<Element> q, Element s) {
return make_Quaternion(
q.x() * s,
q.y() * s,
q.z() * s,
q.w() * s
);
}
/// Quaternion scalar multiplication
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator*(Element s, Quaternion<Element> const &q) {
return make_Quaternion(
s * q.x(),
s * q.y(),
s * q.z(),
s * q.w()
);
}
/// Quaternion scalar division
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator/(Quaternion<Element> const &q, Element s) {
return make_Quaternion(
q.x() / s,
q.y() / s,
q.z() / s,
q.w() / s
);
}
/// Quaternion unary negation
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator-(Quaternion<Element> const &q) {
return make_Quaternion(
-q.x(),
-q.y(),
-q.z(),
-q.w()
);
}
/// Quaternion addition
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator+(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return make_Quaternion(
lhs.x() + rhs.x(),
lhs.y() + rhs.y(),
lhs.z() + rhs.z(),
lhs.w() + rhs.w()
);
}
/// Quaternion subtraction
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator-(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return make_Quaternion(
lhs.x() - rhs.x(),
lhs.y() - rhs.y(),
lhs.z() - rhs.z(),
lhs.w() - rhs.w()
);
}
/// Quaternion product
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator*(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return make_Quaternion(
lhs.w() * rhs.x() + rhs.w() * lhs.x() + lhs.y() * rhs.z() - lhs.z() * rhs.y(),
lhs.w() * rhs.y() + rhs.w() * lhs.y() + lhs.z() * rhs.x() - lhs.x() * rhs.z(),
lhs.w() * rhs.z() + rhs.w() * lhs.z() + lhs.x() * rhs.y() - lhs.y() * rhs.x(),
lhs.w() * rhs.w() - lhs.x() * rhs.x() - lhs.y() * rhs.y() - lhs.z() * rhs.z()
);
}
/// Quaternion division
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator/(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return lhs * reciprocal(rhs);
}
/// Quaternion scalar division
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator/(Element s, Quaternion<Element> const &q) {
return s * reciprocal(q);
}
/// Comparison
template <typename Element>
CUTLASS_HOST_DEVICE
bool operator<(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return true;
}
/// Rotates a 3-vector assuming this is a unit quaternion (a spinor). This avoids computing
/// a reciprocal.
template <typename Element>
CUTLASS_HOST_DEVICE
Matrix3x1<Element> spinor_rotation(
Quaternion<Element> const &spinor, /// unit-length quaternion
Matrix3x1<Element> const &rhs) { /// arbitrary 3-vector
return (spinor * Quaternion<Element>(rhs, 0) * conj(spinor)).pure();
}
/// Inverse rotation of 3-vector assuming this is a unit quaternion (a spinor). This avoids computing
/// a reciprocal.
template <typename Element>
CUTLASS_HOST_DEVICE
Matrix3x1<Element> spinor_rotation_inv(
Quaternion<Element> const &spinor, /// unit-length quaternion
Matrix3x1<Element> const &rhs) { /// arbitrary 3-vector
return (conj(spinor) * Quaternion<Element>(rhs, 0) * spinor).pure();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Quaternion-valued type.
template <typename T>
struct RealType< Quaternion<T> > {
using Type = T;
/// Number of elements
static int const kExtent = Quaternion<T>::kExtent;
CUTLASS_HOST_DEVICE
static Quaternion<T> from_real(double x) {
return Quaternion<T>(static_cast<T>(x));
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Factories
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
CUTLASS_HOST_DEVICE
cutlass::Quaternion<half_t> from_real<cutlass::Quaternion<half_t> >(double r) {
return cutlass::Quaternion<half_t>(half_t(r));
}
template <>
CUTLASS_HOST_DEVICE
cutlass::Quaternion<float> from_real<cutlass::Quaternion<float> >(double r) {
return cutlass::Quaternion<float>(float(r));
}
template <>
CUTLASS_HOST_DEVICE
cutlass::Quaternion<double> from_real<cutlass::Quaternion<double> >(double r) {
return cutlass::Quaternion<double>(r);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// functional.h numeric specializations
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct multiplies<Quaternion<T>> {
CUTLASS_HOST_DEVICE
Quaternion<T> operator()(Quaternion<T> lhs, Quaternion<T> const &rhs) const {
lhs = lhs * rhs;
return lhs;
}
};
/// Squares with optional conversion
template <typename T, typename Output>
struct magnitude_squared<Quaternion<T>, Output> {
CUTLASS_HOST_DEVICE
Output operator()(Quaternion<T> lhs) const {
multiplies<Output> mul_op;
Output y_w = Output(lhs.w());
Output y_x = Output(lhs.x());
Output y_y = Output(lhs.y());
Output y_z = Output(lhs.z());
return mul_op(y_w, y_w) + mul_op(y_x, y_x) + mul_op(y_y, y_y) + \
mul_op(y_z, y_z);
}
};
template <typename T>
struct multiply_add<Quaternion<T>, Quaternion<T>, Quaternion<T>> {
CUTLASS_HOST_DEVICE
Quaternion<T> operator()(
Quaternion<T> const &a,
Quaternion<T> const &b,
Quaternion<T> const &c) const {
T x = c.x();
T y = c.y();
T z = c.z();
T w = c.w();
x += a.w() * b.x();
x += b.w() * a.x();
x += a.y() * b.z();
x += -a.z() * b.y(),
y += a.w() * b.y();
y += b.w() * a.y();
y += a.z() * b.x();
y += -a.x() * b.z();
z += a.w() * b.z();
z += b.w() * a.z();
z += a.x() * b.y();
z += -a.y() * b.x();
w += a.w() * b.w();
w += -a.x() * b.x();
w += -a.y() * b.y();
w += -a.z() * b.z();
return cutlass::make_Quaternion(x, y, z, w);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/quaternion.h/0 | {
"file_path": "include/cutlass/quaternion.h",
"repo_id": "include",
"token_count": 7898
} | 37 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a canonical coordinate for rank=4 tensors offering named indices.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a canonical 4D coordinate used by tensor operations.
struct Tensor4DCoord : public Coord<4> {
/// Base class
using Base = Coord<4>;
/// Index type
using Index = typename Base::Index;
/// LongIndex type
using LongIndex = typename Base::LongIndex;
/// Batch dimension
static int const kN = 0;
/// Height dimension
static int const kH = 1;
/// Width dimension
static int const kW = 2;
/// Channels dimension
static int const kC = 3;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Tensor4DCoord() { }
/// Constructs from Coord<4>
CUTLASS_HOST_DEVICE
Tensor4DCoord(Coord<4> const &coord): Base(coord) { }
/// Helper to construct from N, H, W, and C.
CUTLASS_HOST_DEVICE
Tensor4DCoord(Index n, Index h, Index w, Index c): Base(make_Coord(n, h, w, c)) { }
/// Helper to construct from N, H, W, and C, which are LongIndex type
CUTLASS_HOST_DEVICE
Tensor4DCoord(LongIndex n, LongIndex h, LongIndex w, LongIndex c)
: Base(make_Coord(Index(n), Index(h), Index(w), Index(c))) { }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index const & n() const { return this->at(kN); }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index & n() { return this->at(kN); }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index const & h() const { return this->at(kH); }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index & h() { return this->at(kH); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index const & w() const { return this->at(kW); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index & w() { return this->at(kW); }
/// Returns the channel of the coordinate
CUTLASS_HOST_DEVICE
Index const & c() const { return this->at(kC); }
/// Returns the channel of the coordinate
CUTLASS_HOST_DEVICE
Index & c() { return this->at(kC); }
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
Tensor4DCoord operator+(Base const& b) const {
return Tensor4DCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
Tensor4DCoord operator-(Base const& b) const {
return Tensor4DCoord(Base::operator-(b));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
Tensor4DCoord operator*(Base const& b) const {
return Tensor4DCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
Tensor4DCoord operator/(Base const& b) const {
return Tensor4DCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
Tensor4DCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
Tensor4DCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
Tensor4DCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
Tensor4DCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a canonical 5D coordinate used by tensor operations.
struct Tensor5DCoord : public Coord<5> {
/// Base class
using Base = Coord<5>;
/// Index type
using Index = typename Base::Index;
/// LongIndex type
using LongIndex = typename Base::LongIndex;
/// Batch dimension
static int const kN = 0;
/// Depth dimension
static int const kD = 1;
/// Height dimension
static int const kH = 2;
/// Width dimension
static int const kW = 3;
/// Channels dimension
static int const kC = 4;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Tensor5DCoord() { }
/// Constructs from Coord<5>
CUTLASS_HOST_DEVICE
Tensor5DCoord(Coord<5> const &coord): Base(coord) { }
/// Helper to construct from N, D, H, W, and C.
CUTLASS_HOST_DEVICE
Tensor5DCoord(Index n, Index d, Index h, Index w, Index c): Base(make_Coord(n, d, h, w, c)) { }
/// Helper to construct from N, D, H, W, and C, which are LongIndex type
CUTLASS_HOST_DEVICE
Tensor5DCoord(LongIndex n, LongIndex d, LongIndex h, LongIndex w, LongIndex c)
: Base(make_Coord(Index(n), Index(d), Index(h), Index(w), Index(c))) { }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index const & n() const { return this->at(kN); }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index & n() { return this->at(kN); }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index const & d() const { return this->at(kD); }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index & d() { return this->at(kD); }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index const & h() const { return this->at(kH); }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index & h() { return this->at(kH); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index const & w() const { return this->at(kW); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index & w() { return this->at(kW); }
/// Returns the channel of the coordinate
CUTLASS_HOST_DEVICE
Index const & c() const { return this->at(kC); }
/// Returns the channel of the coordinate
CUTLASS_HOST_DEVICE
Index & c() { return this->at(kC); }
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
Tensor5DCoord operator+(Base const& b) const {
return Tensor5DCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
Tensor5DCoord operator-(Base const& b) const {
return Tensor5DCoord(Base::operator-(b));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
Tensor5DCoord operator*(Base const& b) const {
return Tensor5DCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
Tensor5DCoord operator/(Base const& b) const {
return Tensor5DCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
Tensor5DCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
Tensor5DCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
Tensor5DCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
Tensor5DCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/tensor_coord.h/0 | {
"file_path": "include/cutlass/tensor_coord.h",
"repo_id": "include",
"token_count": 2990
} | 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Ell iterator for Blocked-Ell matrix (ellValue matrix) used with EllMmaPipelined
*/
#pragma once
#include "cutlass/arch/memory.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator.h"
#include "cutlass/transform/threadblock/ell_predicated_tile_access_iterator.h"
#include "cutlass/transform/threadblock/ell_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// EllPredicatedTileIterator
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
/// Regular tile iterator using a precomputed control structure to minimize register liveness
/// and integer arithmetic.
///
/// Layout is assumed to be invariant at the time the precomputed "Params" object is constructed.
///
/// Base pointer and tensor extents may be specified at the time the iterator is constructed.
/// Subsequently, they are assumed to be immutable.
///
/// Adding a logical coordinate offset may be performed at the time the iterator is constructed.
/// Subsequent additions to logical coordinate offset may be performed but are relatively expensive.
///
/// Visitation order is intended to first visit a "residual" tile that may be partially full in
/// both the advance dimension and the steady-state dimension. This is assumed to be the last
/// tile in the iteration sequence. Advancing an iterator that has just been constructed moves to
/// the first tile that is full in the advance dimension and recomputes predicates. Subsequent
/// accesses may be performed without updating internal predicates and are efficient in terms of
/// live register state and pointer arithmetic instructions.
///
/// To be efficient, this assumes the iterator will be dereferenced and advanced at least once
/// outside any looping structure to minimize integer arithmetic.
///
/// Acceses out of bounds are safe so long as `clear_mask()` is called prior to dereferencing
/// the iterator.
///
///
/// Example:
///
/// An efficient pipeline structure may be constructed as follows:
///
// template <typename Iterator>
// __global__ void kernel(
// typename Iterator::Params params,
// typename Iterator::Element *ptr,
// TensorCoord extent) {
//
// typename Iterator::Fragment fragment;
//
// TensorCoord threadblock_offset(0, 0);
//
// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets);
//
//
// fragment = *iter; // load "residue" tile first
// ++iter; // advance to first "steady state" tile and update internal masks
//
//
// #pragma unroll
// for (int i = Remaining - 1; i >= 0; --i) {
//
// f(fragment);
//
// if (!i) {
// iter.clear_mask(); // light-weight operation to clear masks - subsequent loads become NO-OPs.
// }
//
// fragment = *iter; // load tile during "steady state" phase
// ++iter; // advance to next tile - lightweight due to steady-state masks
// }
// }
//
// void host(TensorView<Element, 2, layout::PitchLinear> view) {
//
// using Iterator = transform::threadblock::EllPredicatedTileIterator;
//
// typename Iterator::Params params(view.layout());
//
// kernel<Iterator>(params, view.data());
// }
///
///
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
int AccessSize = ThreadMap::kElementsPerAccess
>
class EllPredicatedTileIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int AccessSize>
class EllPredicatedTileIterator<Shape_, Element_, layout::PitchLinear, AdvanceRank,
ThreadMap_, AccessSize> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
/// Type used for internal memory accesses
using AccessType = AlignedArray<Element, AccessSize, (AccessSize * sizeof_bits<Element>::value / 8)>;
/// Underlying iterator to compute the addresses
using TileAccessIterator =
EllPredicatedTileAccessIterator<Shape, Element, Layout, kAdvanceRank,
ThreadMap, AccessType>;
static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename TileAccessIterator::Mask;
/// Iterator for ELL storage
using EllIterator = typename cutlass::transform::threadblock::ell::Iterator;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
friend EllPredicatedTileIterator;
private:
/// Parameters object
typename TileAccessIterator::Params params_;
public:
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : params_(layout) { }
CUTLASS_HOST_DEVICE
Params() { }
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: address_iterator_(params.params_, pointer, extent, thread_id,
threadblock_offset) {}
/// Construct a EllPredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator &operator++() {
if (kAdvanceRank)
address_iterator_.add_tile_offset({0, 1});
else
address_iterator_.add_tile_offset({1, 0});
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator operator++(int) {
EllPredicatedTileIterator self(*this);
operator++();
return self;
}
/// Returns a stride
CUTLASS_HOST_DEVICE
int get_stride() const { return address_iterator_.get_stride(); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { address_iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { address_iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_HOST_DEVICE
void ell_add_mask(int blocksize) { address_iterator_.ell_add_mask(blocksize); }
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
load_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
address_iterator_.set_iteration_index(idx);
char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + byte_offset;
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr);
cutlass::arch::global_load<AccessType,
sizeof(AccessType)
>(
frag_ptr[idx], access_ptr, address_iterator_.valid());
++address_iterator_;
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_byte_offset(frag, 0); }
CUTLASS_DEVICE
void load_with_ell_index(Fragment &frag, EllIterator &ell_iter) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
address_iterator_.set_iteration_index(idx);
LongIndex ell_offset = 0;
int k_offset = address_iterator_.get_k();
ell_offset = ell_iter.get_offset(k_offset) * sizeof(Element);
char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + ell_offset;
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr);
bool is_valid = address_iterator_.valid();
is_valid = is_valid && (ell_offset >= 0);
cutlass::arch::global_load<AccessType,
sizeof(AccessType)
>(
frag_ptr[idx], access_ptr, is_valid);
++address_iterator_;
}
}
}
}
CUTLASS_DEVICE
void load_with_ell_index_fast(Fragment &frag, EllIterator &ell_iter) {
LongIndex ell_offset = ell_iter.get_offset_fast() * sizeof(Element);
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
address_iterator_.set_iteration_index(idx);
char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + ell_offset;
AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr);
bool is_valid = address_iterator_.valid();
is_valid = is_valid && (ell_offset >= 0);
cutlass::arch::global_load<AccessType,
sizeof(AccessType)
>(
frag_ptr[idx], access_ptr, is_valid);
++address_iterator_;
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
address_iterator_.set_iteration_index(0);
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < kAccessesPerVector; ++v) {
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
char *byte_ptr = reinterpret_cast<char *>(address_iterator_.get()) + byte_offset;
AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_ptr);
if (address_iterator_.valid()) {
*access_ptr = frag_ptr[idx];
}
++address_iterator_;
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_byte_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int AccessSize
>
class EllPredicatedTileIterator<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, AccessSize> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
AccessSize
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Iterator for ELL storage
using EllIterator = typename cutlass::transform::threadblock::ell::Iterator;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {
}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset ///< Initial offset of threadblock
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())
) { }
/// Construct a EllPredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): EllPredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator operator++(int) {
EllPredicatedTileIterator self(*this);
operator++();
return self;
}
/// Returns a stride
CUTLASS_HOST_DEVICE
int get_stride() const { return iterator_.get_stride(); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// add mask for small tiles in ELL
CUTLASS_HOST_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void load_with_ell_index(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index(frag, ell_iter);
}
CUTLASS_DEVICE
void load_with_ell_index_fast(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index_fast(frag, ell_iter);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
iterator_.store_with_byte_offset(frag, byte_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileIterator for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
int AccessSize
>
class EllPredicatedTileIterator<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, AccessSize> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
AccessSize
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Iterator for ELL storage
using EllIterator = typename cutlass::transform::threadblock::ell::Iterator;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {
};
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset ///< Initial offset of threadblock
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())
) { }
/// Construct a EllPredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): EllPredicatedTileIterator(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator operator++(int) {
EllPredicatedTileIterator self(*this);
operator++();
return self;
}
/// Returns a stride
CUTLASS_HOST_DEVICE
int get_stride() const { return iterator_.get_stride(); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// add mask for small tiles in ELL
CUTLASS_HOST_DEVICE
void ell_add_mask(int blocksize) {
iterator_.ell_add_mask(blocksize);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment &frag, LongIndex byte_offset) {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void load_with_ell_index(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index(frag, ell_iter);
}
CUTLASS_DEVICE
void load_with_ell_index_fast(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index_fast(frag, ell_iter);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const &frag, LongIndex byte_offset) {
iterator_.store_with_byte_offset(frag, byte_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileIterator for interleaved data. It is mapped
/// to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int AccessSize, int InterleavedK>
class EllPredicatedTileIterator<Shape_, Element_,
layout::ColumnMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessSize> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileIterator<
layout::PitchLinearShape<Shape::kRow * kInterleavedK,
Shape::kColumn / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessSize>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Iterator for ELL storage
using EllIterator = typename cutlass::transform::threadblock::ell::Iterator;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.row() * kInterleavedK,
extent.column() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.row() * kInterleavedK,
threadblock_offset.column() / kInterleavedK)) {}
/// Construct a EllPredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator operator++(int) {
EllPredicatedTileIterator self(*this);
operator++();
return self;
}
/// Returns a stride
CUTLASS_HOST_DEVICE
int get_stride() const { return iterator_.get_stride(); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_HOST_DEVICE
void ell_add_mask(int blocksize) { iterator_.ell_add_mask(blocksize); }
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
CUTLASS_DEVICE
void load_with_ell_index(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index(frag, ell_iter);
}
CUTLASS_DEVICE
void load_with_ell_index_fast(Fragment &frag, EllIterator& ell_iter) {
iterator_.load_with_ell_index_fast(frag, ell_iter);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of EllPredicatedTileIterator for interleaved-32 data. It is
/// mapped to the congruous layout.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int AccessSize, int InterleavedK>
class EllPredicatedTileIterator<Shape_, Element_,
layout::RowMajorInterleaved<InterleavedK>,
AdvanceRank, ThreadMap_, AccessSize> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
static int const kInterleavedK = InterleavedK;
using Layout = layout::RowMajorInterleaved<kInterleavedK>;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = EllPredicatedTileIterator<
layout::PitchLinearShape<Shape::kColumn * kInterleavedK,
Shape::kRow / kInterleavedK>,
Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessSize>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::kElementsPerAccess>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend EllPredicatedTileIterator;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() {}
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout)
: params_(layout::PitchLinear(layout.stride(0))) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(params.params_, pointer,
layout::PitchLinearCoord(extent.column() * kInterleavedK,
extent.row() / kInterleavedK),
thread_id,
layout::PitchLinearCoord(
threadblock_offset.column() * kInterleavedK,
threadblock_offset.row() / kInterleavedK)) {}
/// Construct a EllPredicatedTileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: EllPredicatedTileIterator(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
EllPredicatedTileIterator operator++(int) {
EllPredicatedTileIterator self(*this);
operator++();
return self;
}
/// Returns a stride
CUTLASS_HOST_DEVICE
int get_stride() const { return iterator_.get_stride(); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { iterator_.get_mask(mask); }
/// add mask for small tiles in ELL
CUTLASS_HOST_DEVICE
void ell_add_mask(int blocksize) { iterator_.ell_add_mask(blocksize); }
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/ell_predicated_tile_iterator.h/0 | {
"file_path": "include/cutlass/transform/threadblock/ell_predicated_tile_iterator.h",
"repo_id": "include",
"token_count": 15559
} | 39 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing computing the addresses of storing of tiles
from pitch-linear rank=2 tensors.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCongruous64b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicandCongruous64b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 64;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 64b");
///< Number of pointers
static int const kPointerCount = 1;
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous +
coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCongruous64b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous64b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCongruous64b,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCongruous64b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCongruous64b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCongruous64b,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for crosswise arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicand64bCrosswise,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicand64bCrosswise;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 64;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 64b");
///< Number of pointers - two pointers are needed if making more than 4 iterations along
///< strided dimension
static int const kPointerCount = (ThreadMap::Iterations::kStrided > 4 ? 2 : 1);
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_[Detail::kPointerCount];
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / ThreadMap::kElementsPerAccess) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data());
byte_offset_[0] = ref.offset(thread_offset_in_threadblock_tile) * sizeof(Element);
if (Detail::kPointerCount == 2) {
byte_offset_[1] = byte_offset_[0] ^ 8;
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset / ThreadMap::kElementsPerAccess;
}
/// Returns a pointer
CUTLASS_DEVICE
AccessType *get() const {
// Map the logical contiguous and strided access to the internal swizzled structure.
int uniform_offset = (iteration_strided_ & 0x3) * stride_ + (iteration_strided_ >> 3) * 16 + stride_ * ThreadMap::Delta::kContiguous * iteration_contiguous_;
char *access_byte_ptr = reinterpret_cast<char *>(pointer_ + uniform_offset);
int byte_offset;
// This iterator may require two byte offsets if it must load more than 8 rows (or 2 iterations)
// in the strided dimension
if (Detail::kPointerCount == 2 && (iteration_strided_ & 0x4)) {
byte_offset = byte_offset_[1];
}
else {
byte_offset = byte_offset_[0];
}
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(coord.strided() * Shape::kStrided + coord.contiguous() * Shape::kContiguous * stride_);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicand64bCrosswise,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicand64bCrosswise,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicand64bCrosswise,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicand64bCrosswise;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicand64bCrosswise,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCongruous128b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicandCongruous128b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128b");
///< Number of pointers
static int const kPointerCount = 1;
};
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int access_offset = iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
iteration_contiguous_ * ThreadMap::Delta::kContiguous /
ThreadMap::kElementsPerAccess;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous +
coord.strided() * Shape::kStrided * stride_ * Layout::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCongruous128b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous128b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCongruous128b,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCongruous128b,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCongruous128b;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCongruous128b,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::TensorOpMultiplicandCrosswise128x4,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorOpMultiplicandCrosswise128x4;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using StrideIndex = typename Layout::Stride::Index;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
static_assert(ThreadMap::kThreads / 32 > 1,
"This tile iterator requires at least two warps.");
/// Internal details made public to facilitate introspection
struct Detail {
/// This iterator is specialized for an access size that is 128 bits in
/// length.
static int const kAccessSizeInBits = 128;
static_assert(sizeof_bits<Element_>::value *
ThreadMap::kElementsPerAccess ==
kAccessSizeInBits,
"This iterator requires a policy whose access size is 128b");
///< Number of pointers
static int const kPointerCount = 1;
};
static_assert(!(ThreadMap::Iterations::kStrided % 2), "This iterator requires at least two iterations along the strided dimension");
/// Element type per access
using AccessType = Array<Element, Layout::kElementsPerAccess>;
private:
//
// Data members
//
/// Stride value
StrideIndex stride_;
/// Internal pointer to first access of tile
AccessType *pointer_;
/// Internal byte offset
Index byte_offset_;
/// Iteration in the contiguous dimension
int iteration_contiguous_;
/// Iteration in the strided dimension
int iteration_strided_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
stride_(ref.stride(0) / Layout::kElementsPerAccess),
byte_offset_(0) {
layout::PitchLinearCoord thread_offset_base = ThreadMap::initial_offset(thread_id);
// This is the offset of a thread within a threadblock tile for a specific
// pointer (units of elements)
layout::PitchLinearCoord thread_offset_in_threadblock_tile = thread_offset_base;
// initialize pointer
pointer_ = reinterpret_cast<AccessType *>(ref.data() + ref.offset(thread_offset_in_threadblock_tile));
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_offset_ += pointer_offset * sizeof(Element);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
AccessType *access_ptr = pointer_;
int offset_c = (iteration_contiguous_ * ThreadMap::Delta::kContiguous + (iteration_strided_ & 1) * 2);
int offset_s = (iteration_strided_ / 2) * 8;
int access_offset = offset_c * stride_ + offset_s;
char *access_byte_ptr =
reinterpret_cast<char *>(access_ptr + access_offset);
return reinterpret_cast<AccessType *>(access_byte_ptr + byte_offset_);
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
return *this;
// Enter here only if (iteration_contiguous_ ==
// ThreadMap::Iteration::kContiguous)
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
// which means we enter the next tile.
iteration_strided_ = 0;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
this->operator++();
return prev;
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
add_pointer_offset(
coord.contiguous() * Shape::kContiguous * stride_ +
coord.strided() * Shape::kStrided * Layout::kElementsPerAccess);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
Shape_, Element_,
layout::ColumnMajorTensorOpMultiplicandCrosswise128x4,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for column-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajorTensorOpMultiplicandCrosswise128x4;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
layout::TensorOpMultiplicandCrosswise128x4,
(kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
)
: iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.row(), coord.column()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
layout::RowMajorTensorOpMultiplicandCrosswise128x4,
AdvanceRank, ThreadMap_, Alignment> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for row-major iterator may along advance along the "
"columns(rank=0) or rows(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajorTensorOpMultiplicandCrosswise128x4;
static int const kAdvanceRank = AdvanceRank;
static int const kAlignment = Alignment;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ThreadMap = ThreadMap_;
/// Underlying iterator type
using UnderlyingIterator = RegularTileAccessIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::TensorOpMultiplicandCrosswise128x4,
(kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;
using AccessType = typename UnderlyingIterator::AccessType;
private:
/// Underlying iterator
UnderlyingIterator iterator_;
public:
/// Construct a TileIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
RegularTileAccessIterator(
TensorRef ref, ///< Pointer to start of tensor
int thread_id ///< ID of each participating thread
):
iterator_({ref.data(), ref.stride()}, thread_id) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Adds a tile offset
CUTLASS_DEVICE
void add_tile_offset(TensorCoord const &coord) {
iterator_.add_tile_offset({coord.column(), coord.row()});
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
CUTLASS_HOST_DEVICE
RegularTileAccessIterator operator++(int) {
RegularTileAccessIterator prev(*this);
++iterator_;
return prev;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h/0 | {
"file_path": "include/cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h",
"repo_id": "include",
"token_count": 15729
} | 40 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from math import prod
from typing import Union
from cuda import cuda, cudart
import numpy as np
import cutlass
from cutlass.backend.frontend import CupyFrontend, NumpyFrontend, TorchFrontend
from cutlass.backend.memory_manager import DevicePtrWrapper
from cutlass.utils.datatypes import is_cupy_tensor, is_numpy_tensor, is_torch_tensor
class ArgumentBase:
"""
Base class for operation arguments
"""
def __init__(
self,
A: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]",
B: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]",
C: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]",
D: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]",
**kwargs,
) -> None:
# tensor_C can be interpreted as the bias with bias=True in keyword args
self.bias = kwargs.get("bias", False)
self.stream = kwargs.get("stream", cuda.CUstream(0))
# RMM buffers used to track tensor lifetime
self.buffers = {}
# Host tensor to copy the computed result back
self.host_tensors = {}
self.ptr_A = self.tensor_to_ptr(A, "A")
self.ptr_B = self.tensor_to_ptr(B, "B")
self.ptr_C = self.tensor_to_ptr(C, "C")
self.ptr_D = self.tensor_to_ptr(D, "D", is_output=True)
if C is not None:
if not isinstance(C, cuda.CUdeviceptr):
self.tensor_c_numel = prod(C.shape)
def tensor_to_ptr(self, tensor, name, is_output=False):
"""
Convert and remember the input tensor to cuda.CUdeviceptr used by cuda python
For numpy.ndarray, it also remembers the host buffer for synchronization
"""
if tensor is None:
return cuda.CUdeviceptr(0)
if is_numpy_tensor(tensor):
if is_output:
assert name
self.buffers[name] = NumpyFrontend.argument(tensor, is_output)
if is_output:
self.host_tensors[name] = tensor
return self.buffers[name].ptr
elif is_torch_tensor(tensor):
return TorchFrontend.argument(tensor)
elif isinstance(tensor, cuda.CUdeviceptr):
return tensor
elif is_cupy_tensor(tensor):
return CupyFrontend.argument(tensor)
else:
raise TypeError("Unsupported Frontend. Only support numpy and torch")
def sync(self, stream_sync=True):
if stream_sync:
(err,) = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
for key in self.host_tensors.keys():
host_tensor = self.host_tensors[key]
(err,) = cuda.cuMemcpyDtoH(
host_tensor,
self.buffers[key].ptr,
host_tensor.size * host_tensor.itemsize,
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
self.free()
def free(self):
"""
Frees allocated device-side memory
"""
# Free any device memory allocated manually
if not cutlass.use_rmm:
for name, buf in self.buffers.items():
if isinstance(buf, DevicePtrWrapper):
err, = cudart.cudaFree(buf.ptr)
if err != cudart.cudaError_t.cudaSuccess:
raise RuntimeError(f"cudaFree failed with error {err}")
if hasattr(self, "workspace_buffer") and isinstance(self.workspace_buffer, DevicePtrWrapper):
err, = cudart.cudaFree(self.workspace_buffer.ptr)
if err != cudart.cudaError_t.cudaSuccess:
raise RuntimeError(f"cudaFree failed with error {err}")
del self.workspace_buffer
| python/cutlass/backend/arguments.py/0 | {
"file_path": "python/cutlass/backend/arguments.py",
"repo_id": "python",
"token_count": 2310
} | 41 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Pass manager for DAG IR.
"""
from typing import Any
import networkx as nx
from cutlass.backend.evt.ir import DAGIR
from cutlass.backend.evt.passes.util import cc_map
class EVTPassBase:
"""
Base class for EVT Passes
"""
dependencies = []
def __init__(self, dag_ir: DAGIR) -> None:
self.dag_ir = dag_ir
self.cc = self.dag_ir.cc
def requires(self) -> None:
"""
This function will be called before the pass is run.
"""
pass
def call(self) -> None:
"""
The pass that is run through the self.dag_ir
"""
raise NotImplementedError(
f"__call__ is not overwritten in Pass {self.__class__.__name__}")
def ensures(self) -> None:
"""
This function will be called after the pass is run.
"""
pass
def __call__(self) -> Any:
self.requires()
self.call()
self.ensures()
def cc_specific_method(self, func):
"""
This enables defining function that behaves differently under different cc
The simplest example of using this function is the following
.. highlight:: python
.. code-block:: python
class ExamplePass(EVTPassBase):
def call(sekf):
# This automatically select the smXX_func based on current cc
self.cc_specific_method(self.func)()
# Interface func, can be empty
def func(self):
pass
# Sm90 specific func
def sm90_func(self):
// sm90 specific method
return
# Sm80 specific func
def sm80_func(self):
// sm80 specific method
return
"""
func_name = f"sm{cc_map[self.cc]}_{func.__name__}"
if hasattr(self, func_name):
return getattr(self, func_name)
else:
raise NotImplementedError(f"func {func.__name__} is not overwritten for Sm{self.cc}")
class EVTPassManager(nx.DiGraph):
"""
Topological-based Pass Manager.
Each registered pass has a list of dependencies. The pass manager organizes
the passes as a DAG and launch the compiler passes under topological order.
"""
def __init__(self, dag_ir: DAGIR, pass_list):
super().__init__()
self.dag_ir = dag_ir
for pass_cls in pass_list:
self.add_pass(pass_cls)
self.sorted_passes = self.schedule()
def get_callable(self, pass_name):
"""
Return the callable of the pass
"""
return self.nodes[pass_name]["callable"]
def add_pass(self, pass_cls):
"""
Add a pass to the pass manager
:param pass_cls: the class of pass
:type pass_cls: derived class of EVTPassBase
"""
name = pass_cls.__name__
pass_callable = pass_cls(self.dag_ir)
self.add_node(name, callable=pass_callable)
def schedule(self):
"""
Schedule the added passes under topological order
"""
# Add edges
for pass_name in self.nodes:
callable = self.get_callable(pass_name)
for dependency_cls in callable.dependencies:
self.add_edge(
dependency_cls.__name__,
type(callable).__name__)
# Topological sort
return list(nx.topological_sort(self))
def __call__(self) -> Any:
"""
Launch the registered passes
"""
for pass_name in self.sorted_passes:
callable = self.get_callable(pass_name)
callable()
| python/cutlass/backend/evt/passes/pass_manager.py/0 | {
"file_path": "python/cutlass/backend/evt/passes/pass_manager.py",
"repo_id": "python",
"token_count": 2156
} | 42 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Common utilities for emitting CUTLASS kernels
"""
import cutlass
# Strings used for printing information about the generation of emitted scripts
_AUTOGEN_STR = f"This file was automatically generated by the CUTLASS {cutlass.__version__} Python interface (https://github.com/nvidia/cutlass/python)"
_CSTYLE_AUTOGEN_COMMENT = f"""// {_AUTOGEN_STR}
"""
_PYSTYLE_AUTOGEN_COMMENT = f"""# {_AUTOGEN_STR}
"""
_CUTLASS_KERNEL_ARGS_2x = """
typename DeviceKernel::Arguments arguments {
cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, K}, // problem size
1,
{alpha, beta},
A, B, C, D,
0, 0, 0, 0, // batch strides
DeviceKernel::LayoutA::packed({M, K}).stride(0), // lda
DeviceKernel::LayoutB::packed({K, N}).stride(0), // ldb
DeviceKernel::LayoutC::packed({M, N}).stride(0), // ldc
DeviceKernel::LayoutC::packed({M, N}).stride(0) // ldd
};
"""
_CUTLASS_KERNEL_ARGS_2x_STREAM_K = """
typename DeviceKernel::Arguments arguments {
cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, K}, // problem size
1,
{alpha, beta},
A, B, C, D,
0, 0, 0, 0, // batch strides
DeviceKernel::LayoutA::packed({M, K}).stride(0), // lda
DeviceKernel::LayoutB::packed({K, N}).stride(0), // ldb
DeviceKernel::LayoutC::packed({M, N}).stride(0), // ldc
DeviceKernel::LayoutC::packed({M, N}).stride(0), // ldd
-1 // avail_sms
};
"""
_CUTLASS_KERNEL_RUN_GEMM_2x = """
using ElementCompute = typename DeviceKernel::EpilogueOutputOp::ElementCompute;
cutlass::Status ${name}_kernel_run(int M, int N, int K,
const DeviceKernel::ElementA* A, const DeviceKernel::ElementB* B, const DeviceKernel::ElementC* C, DeviceKernel::ElementC* D,
ElementCompute alpha, ElementCompute beta) {
${args}
size_t workspace_size = DeviceKernel::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
DeviceKernel gemm_op;
cutlass::Status status = gemm_op.initialize(arguments,
workspace.get(),
nullptr); // CUDA stream
if (status != cutlass::Status::kSuccess) {
return status;
}
status = gemm_op();
return status;
}
"""
_CUTLASS_KERNEL_RUN_GEMM_3x = """
using StrideA = typename DeviceKernel::GemmKernel::StrideA;
using StrideB = typename DeviceKernel::GemmKernel::StrideB;
using StrideC = typename DeviceKernel::GemmKernel::StrideC;
using StrideD = typename DeviceKernel::GemmKernel::StrideD;
using ElementCompute = typename DeviceKernel::EpilogueOutputOp::ElementCompute;
cutlass::Status ${name}_kernel_run(
int M, int N, int K, int L,
const DeviceKernel::ElementA* A, const DeviceKernel::ElementB* B, const DeviceKernel::ElementC* C, DeviceKernel::ElementC* D,
ElementCompute alpha, ElementCompute beta, const cutlass::KernelHardwareInfo& hw_info) {
typename DeviceKernel::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, K, L}, // problem size
{
A, // ptrA
cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(M, K, L)), // stride A
B, // ptrB
cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(N, K, L)), // stride B
},
{
{alpha, beta},
C, // ptrC
cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(M, N, L)), // stride C
D, // ptrD
cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(M, N, L)), // stride D
},
hw_info
};
size_t workspace_size = DeviceKernel::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
DeviceKernel gemm_op;
cutlass::Status status = gemm_op.run(arguments,
workspace.get(),
nullptr); // CUDA stream
return status;
}
"""
_CUTLASS_KERNEL_RUN_GROUPED_GEMM_2x = """
using ElementCompute = typename DeviceKernel::EpilogueOutputOp::ElementCompute;
int threadblock_count = DeviceKernel::sufficient();
cutlass::Status ${name}_kernel_run(int problem_count, cutlass::gemm::GemmCoord* problem_sizes,
DeviceKernel::ElementA** A, DeviceKernel::ElementB** B, DeviceKernel::ElementC** C, DeviceKernel::ElementC** D,
int64_t* lda, int64_t* ldb, int64_t* ldc, int64_t* ldd,
ElementCompute alpha, ElementCompute beta) {
typename DeviceKernel::Arguments arguments {
problem_sizes,
problem_count,
threadblock_count,
{alpha, beta},
A, B, C, D,
lda, ldb, ldc, ldd
};
size_t workspace_size = DeviceKernel::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
DeviceKernel gemm_op;
cutlass::Status status = gemm_op.initialize(arguments,
workspace.get(),
nullptr); // CUDA stream
if (status != cutlass::Status::kSuccess) {
return status;
}
status = gemm_op();
return status;
}
"""
_CUTLASS_KERNEL_RUN_CONV2D_2x = """
using UnderlyingKernel = typename DeviceKernel::UnderlyingKernel;
namespace {
using TensorRefA = typename UnderlyingKernel::TensorRefA;
using TensorRefB = typename UnderlyingKernel::TensorRefB;
using TensorRefC = typename UnderlyingKernel::TensorRefC;
using ElementCompute = typename UnderlyingKernel::EpilogueOutputOp::ElementCompute;
}
template<typename TensorRef, typename Element>
TensorRef get_tensor_ref(cutlass::Tensor4DCoord tensor_coord, Element* ptr){
cutlass::layout::TensorNHWC layout = cutlass::layout::TensorNHWC::packed(tensor_coord);
TensorRef tensor_ref(ptr, layout);
return tensor_ref;
}
cutlass::Status ${name}_kernel_run(cutlass::conv::Conv2dProblemSize* problem_size,
UnderlyingKernel::ElementA* A, UnderlyingKernel::ElementB* B,
UnderlyingKernel::ElementC* C, UnderlyingKernel::ElementC* D,
ElementCompute alpha, ElementCompute beta, std::string split_k_mode,
cudaStream_t stream, int device_id=0) {
// create the tensor references
cutlass::Tensor4DCoord tensor_coord_A = cutlass::conv::implicit_gemm_tensor_a_extent(
cutlass::conv::Operator::k${conv_kind_name}, *problem_size
);
cutlass::Tensor4DCoord tensor_coord_B = cutlass::conv::implicit_gemm_tensor_b_extent(
cutlass::conv::Operator::k${conv_kind_name}, *problem_size
);
cutlass::Tensor4DCoord tensor_coord_C = cutlass::conv::implicit_gemm_tensor_c_extent(
cutlass::conv::Operator::k${conv_kind_name}, *problem_size
);
TensorRefA tensor_ref_A = get_tensor_ref<TensorRefA, UnderlyingKernel::ElementA>(tensor_coord_A, A);
TensorRefB tensor_ref_B = get_tensor_ref<TensorRefB, UnderlyingKernel::ElementB>(tensor_coord_B, B);
TensorRefC tensor_ref_C = get_tensor_ref<TensorRefC, UnderlyingKernel::ElementC>(tensor_coord_C, C);
TensorRefC tensor_ref_D = get_tensor_ref<TensorRefC, UnderlyingKernel::ElementC>(tensor_coord_C, D);
cutlass::conv::SplitKMode mode;
if (split_k_mode == "serial") {
mode = cutlass::conv::SplitKMode::kSerial;
} else if (split_k_mode == "parallel") {
mode = cutlass::conv::SplitKMode::kParallel;
} else {
throw std::runtime_error("Invalid split_k_mode: " + split_k_mode);
}
typename DeviceKernel::Arguments arguments{
*problem_size,
tensor_ref_A,
tensor_ref_B,
tensor_ref_C,
tensor_ref_D,
{alpha, beta},
mode
};
DeviceKernel implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
void* workspace_ptr = device_memory_allocation(workspace_size, device_id);
cutlass::Status status = implicit_gemm_op.can_implement(arguments);
if (status != cutlass::Status::kSuccess) {
return status;
}
status = implicit_gemm_op.initialize(arguments, workspace_ptr, stream);
if (status != cutlass::Status::kSuccess) {
return status;
}
//
// Launch initialized CUTLASS kernel
//
status = implicit_gemm_op(stream);
return status;
}
"""
| python/cutlass/emit/common.py/0 | {
"file_path": "python/cutlass/emit/common.py",
"repo_id": "python",
"token_count": 4533
} | 43 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Profiler based on the cuda events
"""
import re
import subprocess
from cuda import cuda, cudart
import numpy as np
from cutlass import CUTLASS_PATH
from cutlass.backend.library import DataTypeSize
from cutlass.op.op import OperationBase
from cutlass.shape import GemmCoord
from cutlass.utils.datatypes import is_numpy_tensor
class GpuTimer:
def __init__(self) -> None:
self.events = [
cuda.cuEventCreate(cuda.CUevent_flags.CU_EVENT_DEFAULT)[1],
cuda.cuEventCreate(cuda.CUevent_flags.CU_EVENT_DEFAULT)[1],
]
def start(self, stream=cuda.CUstream(0)):
(err,) = cuda.cuEventRecord(self.events[0], stream)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(f"CUDA Error {str(err)}")
def stop(self, stream=cuda.CUstream(0)):
(err,) = cuda.cuEventRecord(self.events[1], stream)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(f"CUDA Error {str(err)}")
pass
def stop_and_wait(self, stream=cuda.CUstream(0)):
self.stop(stream)
if stream:
(err,) = cuda.cuStreamSynchronize(stream)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(f"CUDA Error {str(err)}")
else:
(err,) = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(f"CUDA Error {str(err)}")
def duration(self, iterations=1):
err, duration = cuda.cuEventElapsedTime(self.events[0], self.events[1])
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(f"CUDA Error {str(err)}")
return duration / float(iterations)
class CUDAEventProfiler:
def __init__(self, op: OperationBase, warmup_iterations: int=500, iterations: int=500, *args, **kwargs) -> None:
self.arguments = op.run(*args, **kwargs)
self.operation = op.operation
self.warmup_iterations = warmup_iterations
self.iterations = iterations
self.timer = GpuTimer()
#
# Cutlass Python Interface Profiler
#
def __call__(self):
for _ in range(self.warmup_iterations):
self.operation.run(self.arguments)
self.timer.start()
for _ in range(self.iterations):
self.operation.run(self.arguments)
self.timer.stop_and_wait()
runtime = self.timer.duration(self.iterations)
return runtime
#
# CUTLASS Profiler
#
def run_cutlass_profiler(self):
alpha = 1.0
beta = 1.0
profiler_path = CUTLASS_PATH + "/build/tools/profiler/cutlass_profiler"
kernel_name = self.operation.procedural_name()
verification_providers = "device"
provider = "cutlass"
problem_size = self.arguments.problem_size
if "cutlass3x" in kernel_name:
# cutlass3x generator only have column-major output
layout_name = self.operation.layout_name_3x()
if layout_name[-1] == "t":
new_layout_name = "".join(["n" for l in layout_name if l == "t" or "t"])
problem_size = GemmCoord(problem_size.n, problem_size.m, problem_size.k)
kernel_name = kernel_name.replace(layout_name, new_layout_name)
batch_count = self.arguments.batch_count
cmd = f"{profiler_path} --kernels={kernel_name} --verification-providers={verification_providers} " \
f"--providers={provider} --m={problem_size.m()} --n={problem_size.n()} --k={problem_size.k()} " \
f"--batch_count={batch_count} --alpha={alpha} --beta={beta} "\
f"--warmup-iterations={self.warmup_iterations} --profiling-iterations={self.iterations}"
result = subprocess.getoutput(cmd)
m = re.search(r"Runtime:\s+(?P<runtime>\d+.\d+)", result)
runtime = float(m.group("runtime"))
m = re.search(r"Bytes:\s+(?P<bytes>\d+)", result)
bytes = int(m.group("bytes"))
m = re.search(r"FLOPs:\s+(?P<flops>\d+)", result)
flops = int(m.group("flops"))
# check if the problem size matches
assert bytes == self.bytes(problem_size, batch_count, beta)
assert flops == self.flops(problem_size, batch_count, beta)
return runtime
def bytes(self, problem_size, batch_count=1, beta=0.0):
m = problem_size.m()
n = problem_size.n()
k = problem_size.k()
bytes = (
(DataTypeSize[self.operation.A.element] * m // 8) * k
+ (DataTypeSize[self.operation.B.element] * n // 8) * k
+ (DataTypeSize[self.operation.C.element] * m // 8) * n
)
if beta != 0:
bytes += (DataTypeSize[self.operation.C.element] * m // 8) * n
bytes *= batch_count
return bytes
def flops(self, problem_size, batch_count=1, beta=0.0):
m = problem_size.m()
n = problem_size.n()
k = problem_size.k()
flops_ = (m * n * k) * 2 * batch_count
if beta != 0:
flops_ += m * n * batch_count * 2
return flops_
| python/cutlass/utils/profiler.py/0 | {
"file_path": "python/cutlass/utils/profiler.py",
"repo_id": "python",
"token_count": 2791
} | 44 |
<jupyter_start><jupyter_text>Example of using elementwise activation functions in the CUTLASS Python interfaceThis notebook walks through a basic example of using the CUTLASS Python interface to declare, compile, and run GEMMs with different epilogues.[](https://colab.research.google.com/github/NVIDIA/cutlass/tree/master/examples/00_basic_gemm.ipynb) We first import various packages needed for the example and construct the input and output tensors that will be used in our example.<jupyter_code>import numpy as np
import cutlass
# This controls whether ther C++ GEMM declaration will be printed at each step. Set to `false` to
# omit this information.
print_module = True
m = 256
n = m
k = m
type_A = np.float16
type_B = np.float16
type_C = np.float16
type_D = np.float16
np.random.seed(1234)
scope_min = -4
scope_max = 4
tensor_A = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, k)).astype(type_A))
tensor_B = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(k, n)).astype(type_B))
tensor_C = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, n)).astype(type_C))
alpha = np.float16(1.)
beta = np.float16(0.)
tensor_D = np.zeros(tensor_C.shape).astype(type_D)<jupyter_output>/usr/local/lib/python3.8/dist-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
from .autonotebook import tqdm as notebook_tqdm<jupyter_text>Run a GEMM with an identity activation functionTo begin, we simply run a default GEMM with an identity activation function. This performs the well-known operation `D = alpha * (A @ B) + beta * C`. This is the default activation function used, and does not need to be specified.<jupyter_code>plan = cutlass.op.Gemm(element=np.float16, layout=cutlass.LayoutType.RowMajor)
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, print_module=print_module)<jupyter_output>// Gemm operator cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8
using cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor,
cutlass::half_t,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<256, 128, 64>,
cutlass::gemm::GemmShape<64, 64, 64>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombination<cutlass::half_t, 8, cutlass::half_t, cutlass::half_t>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
3,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
// Define named type
struct cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8_type :
public cutlass_sm80_tensorop_[...]<jupyter_text>Run a GEMM with a ReLU element-wise activation functionCUTLASS makes it easy to support other element-wise activation functions. This results in performing an element-wise after the generic linear combination performed in a GEMM. If we call such an activation function `act`, the resulting formulation is:```D = alpha * (A @ B) + beta * CD = act(D)```Here, we will add a ReLU activation function. Given an input `x`, ReLU returns `max(x, 0)`.This is easy to do in CUTLASS. One only needs to set the plan's `activation` field.<jupyter_code>tensor_D_relu = np.zeros(tensor_C.shape).astype(type_D)
plan.activation = cutlass.epilogue.relu
plan.run(tensor_A, tensor_B, tensor_C, tensor_D_relu, print_module=print_module)<jupyter_output>// Gemm operator cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8
using cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor,
cutlass::half_t,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<256, 128, 64>,
cutlass::gemm::GemmShape<64, 64, 64>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombinationGeneric<cutlass::epilogue::thread::ReLu, cutlass::half_t, 8, cutlass::half_t, cutlass::half_t>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
3,
cutlass::arch::OpMultiplyAdd
>::GemmKernel;
// Define named type
struct cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8[...]<jupyter_text>We can now verify that the result of the GEMM that used a ReLU activation function:<jupyter_code>relu_ref = (tensor_D >= 0).astype(type_D) * tensor_D
np.testing.assert_array_equal(relu_ref, tensor_D_relu)<jupyter_output><empty_output><jupyter_text>Other element-wise activation functionsCUTLASS supports a variety of widely-used element-wise activation functions. We can obtain a list of these functions via the `get_activations()` method.<jupyter_code>activations = plan.activations()
for activation in activations:
print(activation)<jupyter_output><class 'cutlass.backend.epilogue.gelu'>
<class 'cutlass.backend.epilogue.hardswish'>
<class 'cutlass.backend.epilogue.identity'>
<class 'cutlass.backend.epilogue.leaky_relu'>
<class 'cutlass.backend.epilogue.relu'>
<class 'cutlass.backend.epilogue.sigmoid'>
<class 'cutlass.backend.epilogue.silu'>
<class 'cutlass.backend.epilogue.tanh'><jupyter_text>We can then run each of them:<jupyter_code>for activation in activations:
print('=============================================================================================')
print(f'Compiling and running activation {activation}')
print('=============================================================================================')
plan.activation = activation
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, print_module=print_module)<jupyter_output>=============================================================================================
Compiling and running activation <class 'cutlass.backend.epilogue.gelu'>
=============================================================================================
// Gemm operator cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8
using cutlass_sm80_tensorop_h16x8x16gemm_1x1x1_256x128_64x3_tt_align8_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, 8,
cutlass::half_t, cutlass::layout::RowMajor,
cutlass::half_t,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<256, 128, 64>,
cutlass::gemm::GemmShape<64, 64, 64>,
cutlass::gemm::GemmShape<16, 8, 16>,
cutlass::epilogue::thread::LinearCombinationGeneric<cutlass::epilogue::thread::GELU, cutlass:[...] | python/docs/externals/01_epilogue.ipynb/0 | {
"file_path": "python/docs/externals/01_epilogue.ipynb",
"repo_id": "python",
"token_count": 2598
} | 45 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Definition of CuTe Layouts and functions to manipulate them
"""
from itertools import chain
from typing import Union
from .int_tuple import *
class LayoutBase:
pass
def is_layout(x):
return isinstance(x, LayoutBase)
class Layout(LayoutBase):
def __init__(self, _shape, _stride=None):
self.shape = _shape
if _stride is None:
self.stride = prefix_product(self.shape)
else:
self.stride = _stride
# operator ==
def __eq__(self, other):
return self.shape == other.shape and self.stride == other.stride
# operator len(L) (len [rank] like tuples)
def __len__(self):
if is_tuple(self.shape):
return len(self.shape)
else:
return 1
# operator () (map coord to idx)
def __call__(self, *args):
"""
Map a logical coordinate to a linear index (Coord has no Underscore slice operators)
OR
Slice the layout and return the sublayout (Coord has an Underscore slice op)
Follow the same behavior of `Layout::operator(Coord const&)` in cute C++
"""
if has_none(args):
if len(args) == 1:
return Layout(slice_(args[0], self.shape), slice_(args[0], self.stride))
else:
return Layout(slice_(args, self.shape), slice_(args, self.stride))
else:
if len(args) == 1:
return crd2idx(args[0], self.shape, self.stride)
else:
return crd2idx(args, self.shape, self.stride)
# operator [] (get-i like tuples)
def __getitem__(self, i):
if is_tuple(self.shape):
return Layout(self.shape[i], self.stride[i])
else:
assert i == 0
return Layout(self.shape, self.stride)
# size(layout) Size of the domain
def size(self):
return product(self.shape)
# cosize(layout) Size of the codomain
def cosize(self):
return self(self.size() - 1) + 1
# print and str
def __str__(self):
return f"{self.shape}:{self.stride}"
# error msgs and representation
def __repr__(self):
return f"Layout({self.shape},{self.stride})"
# Make Layout from a list of layouts (each layout it's own mode in the result)
def make_layout(*layouts):
if len(layouts) == 1 and not is_layout(layouts[0]):
layouts = layouts[0]
shape, stride = zip(*((a.shape,a.stride) for a in layouts))
return Layout(shape, stride)
# Size of the domain
def size(layout):
if is_layout(layout):
return layout.size()
return product(layout)
# Size of the codomain
def cosize(layout):
return layout.cosize()
# Layout coalesce -- flatten and combine as many modes as possible while preserving the int-to-int function
def coalesce(layout, profile=None):
if is_tuple(profile):
assert len(layout) >= len(profile)
return make_layout(chain((coalesce(layout[i], profile[i]) for i in range( 0,len(profile))),
(layout[i] for i in range(len(profile),len(layout)))))
result_shape = [1]
result_stride = [0]
for (shape,stride) in zip(flatten(layout.shape),flatten(layout.stride)):
# skip their shape-1s
if shape == 1:
continue
# replace our shape-1 with anything
elif result_shape[-1] == 1:
result_shape[-1] = shape
result_stride[-1] = stride
# merge modes if the shape*stride match
elif result_shape[-1] * result_stride[-1] == stride:
result_shape[-1] = result_shape[-1] * shape
# append a new mode
else:
result_shape.append(shape)
result_stride.append(stride)
if len(result_shape) == 1:
return Layout(result_shape[0], result_stride[0])
else:
return Layout(tuple(result_shape), tuple(result_stride))
# Layout filter -- replace all stride-0 modes with size-1 and then coalesce to remove them
def filter(layout, profile=None):
if is_tuple(profile):
assert len(layout) >= len(profile)
return make_layout(chain((filter(layout[i], profile[i]) for i in range( 0,len(profile))),
(layout[i] for i in range(len(profile),len(layout)))))
result_shape = []
result_stride = []
for (shape,stride) in zip(flatten(layout.shape),flatten(layout.stride)):
# skip their shape-1s and stride-0s
if not (shape == 1 or stride == 0):
result_shape.append(shape)
result_stride.append(stride)
if len(result_shape) == 0:
return Layout(1,0)
else:
return coalesce(Layout(tuple(result_shape), tuple(result_stride)))
# Layout composition
# Use tuples-of-layouts to perform this operation by-mode and None as no-op
def composition(layoutA, layoutB):
if layoutB is None:
return layoutA
elif is_int(layoutB):
return composition(layoutA, Layout(layoutB))
elif is_tuple(layoutB):
assert len(layoutA) >= len(layoutB)
return make_layout(chain((composition(layoutA[i], layoutB[i]) for i in range( 0,len(layoutB))),
(layoutA[i] for i in range(len(layoutB),len(layoutA)))))
elif is_tuple(layoutB.shape):
return make_layout(composition(layoutA, layoutB_i) for layoutB_i in layoutB)
if layoutB.stride == 0:
return Layout(layoutB.shape, 0)
else:
result_shape = []
result_stride = []
rest_shape = layoutB.shape
rest_stride = layoutB.stride
for (s, d) in zip(flatten(layoutA.shape)[:-1], flatten(layoutA.stride)[:-1]):
s1 = shape_div(s, rest_stride)
result_shape.append(min(s1,rest_shape))
result_stride.append(rest_stride * d)
rest_shape = shape_div(rest_shape, abs(s1))
rest_stride = shape_div(rest_stride, s)
result_shape.append(rest_shape)
result_stride.append(rest_stride * flatten(layoutA.stride)[-1])
return coalesce(Layout(tuple(result_shape), tuple(result_stride)))
# Layout complement
def complement(layout, max_idx=1):
if is_int(layout):
return complement(Layout(layout))
result_shape = []
result_stride = []
current_idx = 1
sorted_DS = sorted(zip(flatten(layout.stride), flatten(layout.shape)))
for (stride, shape) in sorted_DS:
if stride == 0 or shape == 1:
continue
in_bound = current_idx <= shape * stride
# To support symbolic value which can't be evaluated now
assert (type(in_bound) is not bool) or in_bound
result_shape.append(stride // current_idx)
result_stride.append(current_idx)
current_idx = shape * stride
result_shape.append((max_idx + current_idx - 1) // current_idx) # ceil_div
result_stride.append(current_idx)
return coalesce(Layout(tuple(result_shape), tuple(result_stride)))
# Layout right inverse
def right_inverse(layout):
if layout is None:
return None
elif is_int(layout):
return Layout(layout)
result_shape = []
result_stride = []
current_idx = 1
flat_shape = flatten(layout.shape)
flat_stride = flatten(layout.stride)
sorted_DSA = sorted(zip(flat_stride, flat_shape, prefix_product(flat_shape)))
for (stride,shape,rstride) in sorted_DSA:
if shape == 1:
continue
if current_idx != stride:
break
result_shape.append(shape)
result_stride.append(rstride)
current_idx = shape * stride
return coalesce(Layout(tuple(result_shape), tuple(result_stride)))
# Layout left inverse
def left_inverse(layout):
if layout is None:
return None
elif is_int(layout):
return Layout(layout)
return right_inverse(make_layout(layout, complement(layout)))
# Split a layout by the composition of B and the "rest"
# Use tuples-of-layouts to perform this operation by-mode and None as no-op
def logical_divide(layoutA, layoutB):
if layoutB is None:
return layoutA
elif is_int(layoutB):
return logical_divide(layoutA, Layout(layoutB))
elif is_tuple(layoutB):
assert len(layoutA) >= len(layoutB)
return make_layout(chain((logical_divide(layoutA[i], layoutB[i]) for i in range( 0,len(layoutB))),
(layoutA[i] for i in range(len(layoutB),len(layoutA)))))
return composition(layoutA, make_layout(layoutB, complement(layoutB, size(layoutA))))
# Reproduce a layoutA over a layoutB
# Use tuples-of-layouts to perform this operation by-mode and None as no-op
def logical_product(layoutA, layoutB):
if layoutB is None:
return layoutA
elif is_int(layoutB):
return logical_divide(layoutA, Layout(layoutB))
elif is_tuple(layoutB):
assert len(layoutA) >= len(layoutB)
return make_layout(chain((logical_product(layoutA[i], layoutB[i]) for i in range( 0,len(layoutB))),
(layoutA[i] for i in range(len(layoutB),len(layoutA)))))
return make_layout(layoutA, composition(complement(layoutA, size(layoutA)*cosize(layoutB)), layoutB));
# Gather the modes from a hierarchical logical_divide or logical_product
def hier_unzip(splitter, layoutA, layoutB):
if layoutB is None:
return make_layout(Layout(1,0), layoutA)
elif is_tuple(layoutB):
assert len(layoutA) >= len(layoutB)
# A layout with shape ((A,a),(B,b),(C,c))
split = make_layout(hier_unzip(splitter, layoutA[i], layoutB[i]) for i in range(0,len(layoutB)))
# Gather to shape ((A,B,C,...),(a,b,c,...,y,z))
return make_layout(make_layout( split[i][0] for i in range( 0,len(layoutB))),
make_layout(chain((split[i][1] for i in range( 0,len(layoutB))),
(layoutA[i] for i in range(len(layoutB),len(layoutA))))))
# splitter must return a rank-2 layout
return splitter(layoutA, layoutB)
# Apply logical divide hierarchically and gather the split modes into two modes
def zipped_divide(layoutA, layoutB):
return hier_unzip(logical_divide, layoutA, layoutB)
# Perform logical divide hierarchically and gather tiles (B-layouts) into a new mode
def tiled_divide(layoutA, layoutB):
result = zipped_divide(layoutA, layoutB)
return make_layout([result[0]] + [result[1][i] for i in range(len(result[1]))])
# Apply logical product hierarchically and gather the split modes into two modes
def zipped_product(layoutA, layoutB):
return hier_unzip(logical_product, layoutA, layoutB)
# Perform logical product hierarchically and gather tiles (B-layouts) into a new mode
def tiled_product(layoutA, layoutB):
result = zipped_product(layoutA, layoutB)
return make_layout([result[0]] + [result[1][i] for i in range(len(result[1]))])
def slice_and_offset(crd: tuple,
layout: Layout):
return (Layout(slice_(crd, layout.shape), slice_(crd, layout.stride)),
crd2idx(crd, layout.shape, layout.stride))
| python/pycute/layout.py/0 | {
"file_path": "python/pycute/layout.py",
"repo_id": "python",
"token_count": 4643
} | 46 |
################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Unittest for mixed types of nodes in SM90
"""
import logging
import unittest
import cutlass
from cutlass.backend import *
from cutlass.epilogue import *
from cutlass.swizzle import ThreadblockSwizzleStreamK
from utils.evt_testbed import EVTTestBed, EVTTestCaseBase
cutlass.set_log_level(logging.WARNING)
@unittest.skipIf(device_cc() not in [80, 86, 89, 90], "This unittest is only supported on CC [80, 86, 89, 90]")
class TestEVTMixed(EVTTestCaseBase):
def test_mixed_dag(self):
def evt_mixed_dag(accum, alpha, C, beta, aux, cbias, rbias):
F = alpha * accum + (beta * C + aux)
F_row_max = max(F, dim=[0, 1])
E = relu(F + 1) + cbias + rbias
E_col_max = max(E, dim=[0, 2])
D = E + F
return D, F, F_row_max, E_col_max
if device_cc() == 80:
alignments = [2, 4, 8]
else:
# Sm90 EVT currently only supports 128-bit alignment
alignments = [8,]
for align in alignments:
for m, n, k, l in self.get_problem_sizes(align):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 1.0,
"C": self.fake_tensor(self.element, (l, m, n)),
"beta": 1.0,
"aux": self.fake_tensor(self.element, (l, m, n)),
"cbias": self.fake_tensor(self.element, (m, 1)),
"rbias": self.fake_tensor(self.element, (n,)),
"D": self.fake_tensor(self.element, (l, m, n)),
"F": self.fake_tensor(self.element, (l, m, n)),
"F_row_max": self.fake_tensor(DataType.f32, (n,)),
"E_col_max": self.fake_tensor(DataType.f32, (m, 1))
}
launcher = EVTTestBed(self.element, evt_mixed_dag, example_inputs)
input_keys = ["alpha", "C", "beta", "aux", "cbias", "rbias"]
result_keys = ["D", "F", "F_row_max", "E_col_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
@unittest.skipIf(device_cc() not in [80, 89], "This unittest is for cc 80 and 89 only")
def test_mixed_dag_float(self):
def evt_mixed_dag(accum, alpha, C, beta, aux, cbias, rbias):
F = alpha * accum + (beta * C + aux)
F_row_max = max(F, dim=[0, 1])
E = relu(F + 1) + cbias + rbias
E_col_max = max(E, dim=[0, 2])
D = E + F
return D, F, F_row_max, E_col_max
for align in [3, 2, 4]:
for m, n, k, l in self.get_problem_sizes(align):
example_inputs = {
"accum": self.fake_tensor(np.float32, (l, m, n)),
"alpha": 1.0,
"C": self.fake_tensor(np.float32, (l, m, n)),
"beta": 1.0,
"aux": self.fake_tensor(np.float32, (l, m, n)),
"cbias": self.fake_tensor(np.float32, (m, 1)),
"rbias": self.fake_tensor(np.float32, (n,)),
"D": self.fake_tensor(np.float32, (l, m, n)),
"F": self.fake_tensor(np.float32, (l, m, n)),
"F_row_max": self.fake_tensor(np.float32, (n,)),
"E_col_max": self.fake_tensor(np.float32, (m, 1))
}
launcher = EVTTestBed(DataType.f32, evt_mixed_dag, example_inputs)
input_keys = ["alpha", "C", "beta", "aux", "cbias", "rbias"]
result_keys = ["D", "F", "F_row_max", "E_col_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
@unittest.skipIf(device_cc() not in [80, 89], "This unittest is for cc 80 and 89 only")
def test_mixed_dag_stage2(self):
def evt_mixed_dag(accum, alpha, C, beta, aux, cbias, rbias):
F = alpha * accum + (beta * C + aux)
F_row_max = max(F, dim=[0, 1])
E = relu(F + 1) + cbias + rbias
E_col_max = max(E, dim=[0, 2])
D = E + F
return D, F, F_row_max, E_col_max
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 1.0,
"C": self.fake_tensor(self.element, (l, m, n)),
"beta": 1.0,
"aux": self.fake_tensor(self.element, (l, m, n)),
"cbias": self.fake_tensor(self.element, (m, 1)),
"rbias": self.fake_tensor(self.element, (n,)),
"D": self.fake_tensor(self.element, (l, m, n)),
"F": self.fake_tensor(self.element, (l, m, n)),
"F_row_max": self.fake_tensor(DataType.f32, (n,)),
"E_col_max": self.fake_tensor(DataType.f32, (m, 1))
}
launcher = EVTTestBed(self.element, evt_mixed_dag, example_inputs, epilogue_stages=2)
input_keys = ["alpha", "C", "beta", "aux", "cbias", "rbias"]
result_keys = ["D", "F", "F_row_max", "E_col_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
@unittest.skipIf(device_cc() not in [80, 89], "This unittest is for cc 80 and 89 only")
def test_mixed_dag_partition_k(self):
def evt_mixed_dag(accum, alpha, C, beta, aux, cbias, rbias):
F = alpha * accum + (beta * C + aux)
F_row_max = max(F, dim=[0, 1])
E = relu(F + 1) + cbias + rbias
E_col_max = max(E, dim=[0, 2])
D = E + F
return D, F, F_row_max, E_col_max
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 1.0,
"C": self.fake_tensor(self.element, (l, m, n)),
"beta": 1.0,
"aux": self.fake_tensor(self.element, (l, m, n)),
"cbias": self.fake_tensor(self.element, (m, 1)),
"rbias": self.fake_tensor(self.element, (n,)),
"D": self.fake_tensor(self.element, (l, m, n)),
"F": self.fake_tensor(self.element, (l, m, n)),
"F_row_max": self.fake_tensor(DataType.f32, (n,)),
"E_col_max": self.fake_tensor(DataType.f32, (m, 1))
}
tile_description = {
"threadblock_shape": [128, 128, 64],
"warp_count": [2, 2, 2]
}
launcher = EVTTestBed(self.element, evt_mixed_dag, example_inputs, tile_description=tile_description, epilogue_stages=2)
input_keys = ["alpha", "C", "beta", "aux", "cbias", "rbias"]
result_keys = ["D", "F", "F_row_max", "E_col_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
@unittest.skipIf(device_cc() not in [80, 89], "This unittest is for cc 80 and 89 only")
def test_mixed_dag_stream_k(self):
def evt_mixed_dag(accum, alpha, C, beta, aux, cbias, rbias):
F = alpha * accum + (beta * C + aux)
F_row_max = max(F, dim=[0, 1])
E = relu(F + 1) + cbias + rbias
E_col_max = max(E, dim=[0, 2])
D = E + F
return D, F, F_row_max, E_col_max
# High per-sm occupancy tile_description
tile_description = {
"threadblock_shape": [128, 128, 32],
"warp_count": [2, 2, 1],
"stages": 3
}
tds = [None, tile_description]
for td in tds:
for m, n, k, l in self.get_problem_sizes(8, k=960, batch_count=[1, 3]):
if l == 1:
example_inputs = {
"accum": self.fake_tensor(self.element, (m, n)),
"alpha": 1.0,
"C": self.fake_tensor(self.element, (m, n)),
"beta": 1.0,
"aux": self.fake_tensor(self.element, (m, n)),
"cbias": self.fake_tensor(self.element, (m, 1)),
"rbias": self.fake_tensor(self.element, (n,)),
"D": self.fake_tensor(self.element, (m, n)),
"F": self.fake_tensor(self.element, (m, n)),
"F_row_max": self.fake_tensor(DataType.f32, (n,)),
"E_col_max": self.fake_tensor(DataType.f32, (m, 1))
}
else:
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 1.0,
"C": self.fake_tensor(self.element, (l, m, n)),
"beta": 1.0,
"aux": self.fake_tensor(self.element, (l, m, n)),
"cbias": self.fake_tensor(self.element, (m, 1)),
"rbias": self.fake_tensor(self.element, (n,)),
"D": self.fake_tensor(self.element, (l, m, n)),
"F": self.fake_tensor(self.element, (l, m, n)),
"F_row_max": self.fake_tensor(DataType.f32, (n,)),
"E_col_max": self.fake_tensor(DataType.f32, (m, 1))
}
if td is not None:
launcher = EVTTestBed(
self.element, evt_mixed_dag, example_inputs,
tile_description=td,
swizzling_functor=ThreadblockSwizzleStreamK, backend="torch")
else:
launcher = EVTTestBed(
self.element, evt_mixed_dag, example_inputs,
swizzling_functor=ThreadblockSwizzleStreamK, backend="torch")
input_keys = ["alpha", "C", "beta", "aux", "cbias", "rbias"]
result_keys = ["D", "F", "F_row_max", "E_col_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
def test_mixed_dag_no_batch(self):
def evt_mixed_dag_no_batch(accum, alpha, C, beta, aux, cbias, rbias):
F = alpha * accum + (beta * C + aux)
F_row_max = max(F, dim=[0, 1])
E = relu(F + 1) + cbias + rbias
E_col_max = max(E, dim=[0, 2])
D = E + F
return D, F, F_row_max, E_col_max
for m, n, k, _ in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (m, n)),
"alpha": 1.0,
"C": self.fake_tensor(self.element, (m, n)),
"beta": 1.0,
"aux": self.fake_tensor(self.element, (m, n)),
"cbias": self.fake_tensor(self.element, (m, 1)),
"rbias": self.fake_tensor(self.element, (n,)),
"D": self.fake_tensor(self.element, (m, n)),
"F": self.fake_tensor(self.element, (m, n)),
"F_row_max": self.fake_tensor(DataType.f32, (n,)),
"E_col_max": self.fake_tensor(DataType.f32, (m, 1))
}
launcher = EVTTestBed(self.element, evt_mixed_dag_no_batch, example_inputs)
input_keys = ["alpha", "C", "beta", "aux", "cbias", "rbias"]
result_keys = ["D", "F", "F_row_max", "E_col_max"]
launcher.verify((m, n, k), input_keys, result_keys, 1)
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/evt/evt_mixed_sm80_90.py/0 | {
"file_path": "test/python/cutlass/evt/evt_mixed_sm80_90.py",
"repo_id": "test",
"token_count": 7012
} | 47 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from cutlass_library import SubstituteTemplate
import cutlass
from cutlass_library import (
DataTypeNames,
EpilogueScheduleSuffixes,
KernelScheduleSuffixes,
LayoutType,
OpcodeClassNames,
ShortDataTypeNames,
ShortLayoutTypeNames
)
from cutlass.backend import library
from gemm_testbed import test_all_gemm
class Layout:
"""
Utility class to map transpose and non-transpose terminology to row- and column-major terminology
"""
T = LayoutType.RowMajor
N = LayoutType.ColumnMajor
class LayoutCombination:
"""
Utility class defining all combinations of row- and column-major layouts for operands to a GEMMs
"""
NNN = (Layout.N, Layout.N, Layout.N)
NNT = (Layout.N, Layout.N, Layout.T)
NTN = (Layout.N, Layout.T, Layout.N)
NTT = (Layout.N, Layout.T, Layout.T)
TNN = (Layout.T, Layout.N, Layout.N)
TNT = (Layout.T, Layout.N, Layout.T)
TTN = (Layout.T, Layout.T, Layout.N)
TTT = (Layout.T, Layout.T, Layout.T)
def get_name(
layouts,
alignments,
element_output,
element_accumulator,
element_epilogue,
cluster_shape,
threadblock_shape,
stages,
element_a,
element_b,
element_c,
arch,
opclass,
kernel_schedule=None,
epilogue_schedule=None,
suffix="",
):
"""
Generates a procedural name for a test case.
:param layouts: indexable container of layouts of A, B, and C operands
:param alignments: indexable container of alignments of A, B, and C operands
:param element_output: data type of the output element
:param element_accumulator: data type used in accumulation
:param element_epilogue: data type used in computing the epilogue
:param cluster_shape: indexable container of dimensions of threadblock cluster to be launched
:param threadblock_shape: indexable container of dimensions of threadblock tiles
:param stages: number of pipeline stages to use in the kernel
:type stages: int
:param element_a: data type of operand A
:param element_b: data type of operand B
:param element_c: data type of operand C
:param arch: compute capability of kernel being generated
:type arch: int
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
:type opclass: cutlass.OpcodeClass
:param kernel_schedule: kernel_schedule type
:type kernel_schedule: cutlass.KernelScheduleType
:param epilogue_schedule: epilogue_schedule type
:type epilogue_schedule: cutlass.EpilogueScheduleType
:param suffix: additional string to add to the suffix of the name
:type suffix: str
:return: str
"""
name_format = "test_SM${arch}_Device_Gemm_${eA}${lA}_${eB}${lB}_${eC}${lC}_${opclass}_${acc}_${tbM}x${tbN}x${tbK}_${cM}x${cN}x${cK}_${stages}_align${aA}-${aB}-${aC}${k}${e}${suffix}"
return SubstituteTemplate(
name_format,
{
"arch": str(arch),
"eA": DataTypeNames[element_a],
"eB": DataTypeNames[element_b],
"eC": DataTypeNames[element_c],
"lA": ShortLayoutTypeNames[layouts[0]],
"lB": ShortLayoutTypeNames[layouts[1]],
"lC": ShortLayoutTypeNames[layouts[2]],
"opclass": OpcodeClassNames[opclass],
"acc": DataTypeNames[element_accumulator],
"cM": str(cluster_shape[0]),
"cN": str(cluster_shape[1]),
"cK": str(cluster_shape[2]),
"tbM": str(threadblock_shape[0]),
"tbN": str(threadblock_shape[1]),
"tbK": str(threadblock_shape[2]),
"stages": str(stages) if stages is not None else "auto",
"aA": str(alignments[0]),
"aB": str(alignments[1]),
"aC": str(alignments[2]),
"k": "" if kernel_schedule is None else KernelScheduleSuffixes[kernel_schedule],
"e": "" if epilogue_schedule is None else EpilogueScheduleSuffixes[epilogue_schedule],
"suffix": "" if suffix is None else suffix,
},
)
def add_test_gemm(
cls=None,
cc=None,
element=None,
layouts=None,
alignments=None,
element_output=None,
element_accumulator=None,
cluster_shape=None,
threadblock_shape=None,
warp_count=None,
stages=None,
opclass=None,
swizzle=None,
kernel_schedule=None,
epilogue_schedule=None,
compilation_modes=['nvcc', 'nvrtc'],
element_A=None,
element_B=None,
element_C=None):
"""
Create test-running functions with the given specification and set it as a method of ``cls``.
:param cls: class to which the generated method will be added
:type cls: type
:param cc: compute capability to compile for
:type cc: int
:param element: data type of A and B operands
:type element: cutlass.DataType.f16
:param layouts: layouts of A, B, and C operands
:type layouts: list or tuple
:param alignments: alingments of A, B, and C operands
:type alignments: list or tuple
:param element_output: data type of the output element
:type element_output: cutlass.DataType
:param element_accumulator: data type used in accumulation
:type element_accumulator: cutlass.DataType
:param cluster_shape: dimensions of clusters
:type cluster_shape: list or tuple
:param threadblock_shape: dimensions of threadblock tiles
:type threadblock_shape: list or tuple
:param warp_count: warps to be launched per threadblock dimension
:type warp_count: list or tuple
:param stages: number of pipeline stages to use in the kernel
:type stages: int
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
:type opclass: cutlass.OpcodeClass
:param swizzle: threadblock swizzling functor
:param kernel_schedule: kernel schedule to use
:type kernel_schedule: cutlass.KernelScheduleType
:param epilogue_schedule: epilogue schedule to use
:type epilogue_schedule: cutlass.EpilogueScheduleType
:param compilation_modes: list of compilers to used in testing the kernel (options: 'nvrtc', 'nvcc')
:type compilation_modes: list,
:param element_A: data type of operand A. If set, overrides ``element``
:type element_A: cutlass.DataType
:param element_B: data type of operand B. If set, overrides ``element``
:type element_B: cutlass.DataType
:param element_C: data type of operand C. If set, overrides ``element``
:type element_C: cutlass.DataType
"""
if element_A is None:
element_A = element
if element_B is None:
element_B = element
if element_C is None:
element_C = element
if element_output is None:
element_output = element
if element_accumulator is None:
element_accumulator = element
for compilation_mode in compilation_modes:
def run(self):
"""
Dynamically-generated function that constructs a GEMM operation and verifies it against
multiple test cases.
"""
layout_A, layout_B, layout_C = layouts
alignment_A, alignment_B, alignment_C = alignments
plan = cutlass.op.Gemm(element_A=element_A, element_B=element_B,
element_C=element_C, element_D=element_output,
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C,
element_accumulator=element_accumulator,
kernel_cc=cc)
plan.opclass = opclass
if swizzle is not None:
plan.swizzling_functor = swizzle
td = plan.tile_descriptions()[0]
if warp_count is not None:
td.warp_count = warp_count
td.threadblock_shape = threadblock_shape
td.stages = stages
td.cluster_shape = cluster_shape
op = plan.construct(tile_description=td, alignment_A=alignment_A, alignment_B=alignment_B, alignment_C=alignment_C)
self.assertTrue(test_all_gemm(op, 'universal', compilation_mode=compilation_mode))
element_epilogue = element_accumulator
name = get_name(
layouts=layouts, alignments=alignments, element_output=element_output, element_accumulator=element_accumulator,
element_epilogue=element_epilogue, cluster_shape=cluster_shape, threadblock_shape=threadblock_shape,
stages=stages, element_a=element_A, element_b=element_B, element_c=element_C, arch=cc, opclass=opclass,
kernel_schedule=kernel_schedule, epilogue_schedule=epilogue_schedule, suffix=f'_{compilation_mode}')
setattr(cls, name, run)
| test/python/cutlass/gemm/utils.py/0 | {
"file_path": "test/python/cutlass/gemm/utils.py",
"repo_id": "test",
"token_count": 4014
} | 48 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "../common/cutlass_unit_test.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorView, rank2_contiguous_dynamic) {
int const M = 8;
int const N = 16;
typedef cutlass::TensorView<int, cutlass::layout::ContiguousMatrix> ContiguousTensorView;
cutlass::layout::Matrix layouts[] = {
cutlass::layout::Matrix::kColumnMajor,
cutlass::layout::Matrix::kRowMajor
};
cutlass::Coord<2> bounds = cutlass::make_Coord(M - 2, N - 2);
for (int i = 0; i < 2; ++i) {
int matrix_data[M * N] = { 0 };
int row_stride;
int col_stride;
if (layouts[i] == cutlass::layout::Matrix::kColumnMajor) {
row_stride = 1;
col_stride = M;
}
else {
row_stride = N;
col_stride = 1;
}
// Use helper to determine stride vector from leading dimension
ContiguousTensorView view(
matrix_data,
cutlass::layout::ContiguousMatrix::packed(cutlass::make_Coord(M, N), layouts[i]),
bounds);
ASSERT_TRUE(view.good());
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
cutlass::Coord<2> coord = cutlass::make_Coord(m, n);
if (view.contains(coord)) {
view.at(coord) = m * N + n;
}
}
}
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
int expected = 0;
if (m < bounds[0] && n < bounds[1]) {
expected = int(m * N + n);
}
EXPECT_EQ(matrix_data[m * row_stride + n * col_stride], expected);
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Uncomment the following line to observe output from printing TensorView objects
//
// #define OBSERVE_TENSORVIEW_IO // uncomment to enable printing
#ifdef OBSERVE_TENSORVIEW_IO
// This test construct a TensorView of rank=2 with matrix layouts known at runtime. This
// uses TensorRefMapFunc classes defined in cutlass/matrix_traits.h to define the mapping
// from logical tensor indices to storage in memory.
//
// Helpers in tools/util/tensor_view_io.h print both the logical TensorView and the
// linear memory of the tensor.
TEST(TensorView, contiguous) {
int const M = 8;
int const N = 16;
typedef cutlass::TensorView<
int32_t,
cutlass::layout::ContiguousLayout> ContiguousTensorView;
cutlass::layout::Matrix layouts[] = {
cutlass::layout::Matrix::kColumnMajor,
cutlass::layout::Matrix::kRowMajor
};
cutlass::Coord<2> bounds = cutlass::make_Coord(M, N);
for (int i = 0; i < 2; ++i) {
int matrix_data[M * N] = { 0 };
int ldm;
int row_stride;
int col_stride;
if (layouts[i] == cutlass::layout::Matrix::kColumnMajor) {
row_stride = 1;
col_stride = M;
ldm = col_stride;
}
else {
row_stride = N;
col_stride = 1;
ldm = row_stride;
}
// Use helper to determine stride vector from leading dimension
ContiguousTensorView view(
matrix_data,
cutlass::layout::ContiguousLayout::stride(layouts[i], ldm),
bounds);
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
cutlass::Coord<2> coord = cutlass::make_Coord(m, n);
if (view.contains(coord)) {
view.at(coord) = m * N + n;
}
}
}
std::cout << "---------\n";
std::cout << (layouts[i] == cutlass::layout::Matrix::kColumnMajor ?
"Column-major:" : "Row-major:") << "\n\n";
std::cout << "Logical view:\n";
std::cout.width(4);
std::cout << view << "\n" << std::endl; // Print TensorView object.
std::cout << "Linear memory:";
for (int idx = 0; idx < view.capacity(); ++idx) {
if (!(idx % (layouts[i] == cutlass::layout::Matrix::kColumnMajor ? M : N))) {
std::cout << std::endl;
}
std::cout << std::setw(4) << view.at(idx) << " ";
}
std::cout << "\n" << std::endl;
}
}
// This test is similar to the previous except it uses a column-major, interleaved data
// layout. The test prints both the logical representation (a typical column-major matrix)
// and a representation of linear memory.
//
// Note, the interleave=4 structure implies that every four consecutive elements in the
// same row shall be adjacent in memory followed by the next row.
TEST(TensorView, rank2_column_major_interleaved) {
int const M = 16;
int const N = 16;
int const kInterleave = 4;
int matrix_data[M * N] = {0};
cutlass::Coord<2> bounds = cutlass::make_Coord(M, N);
// Define the TensorRefMapFunc for a column-major interleaved matrix format
typedef cutlass::layout::ColumnMajorInterleaved<kInterleave> TensorRefMapFunc;
// Define a TensorView of rank=2 using the column-major interleaved mapping function
typedef cutlass::TensorView<
int,
TensorRefMapFunc> InterleavedTensorView;
InterleavedTensorView view(
matrix_data,
TensorRefMapFunc::stride(M),
bounds);
// Initialize
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
view.at(cutlass::make_Coord(m, n)) = m + n * M;
}
}
// Print logical view
std::cout << "Column-major, interleave=" << kInterleave << " (logical view):\n";
std::cout << std::setw(4) << view << "\n" << std::endl;
// Now define a linear view of the same data in memory
typedef cutlass::TensorView<int, 2, cutlass::layout::RowMajor> LinearTensorView;
LinearTensorView linear_view(matrix_data, cutlass::make_Coord(N), bounds);
std::cout << "Linear view in memory:\n";
std::cout << std::setw(4) << linear_view << std::endl;
}
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(TensorView, int4) {
int const M = 4;
int const N = 8;
using T = cutlass::int4b_t;
cutlass::HostTensor<T, cutlass::layout::RowMajor> tensor({M, N});
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
T x = T(n ^ m); // some simple hash
tensor.host_view().at({m, n}) = x;
}
}
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
int x = (n ^ m); // some simple hash
EXPECT_TRUE(int(tensor.host_view().at({m, n})) == x);
}
}
EXPECT_EQ(tensor.size(), M * N);
}
TEST(TensorView, uint4) {
int const M = 4;
int const N = 8;
using T = cutlass::uint4b_t;
cutlass::HostTensor<T, cutlass::layout::RowMajor> tensor({M, N});
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
T x = T(n ^ m); // some simple hash
tensor.host_view().at({m, n}) = x;
}
}
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
int x = (n ^ m); // some simple hash
EXPECT_TRUE(int(tensor.host_view().at({m, n})) == x);
}
}
EXPECT_EQ(tensor.size(), M * N);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/core/tensor_view.cu/0 | {
"file_path": "test/unit/core/tensor_view.cu",
"repo_id": "test",
"token_count": 3276
} | 49 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cute/tensor.hpp>
using namespace cute;
template <class Layout>
void
test_coalesce(Layout const& layout)
{
auto coalesce_layout = coalesce(layout);
CUTLASS_TRACE_HOST(shape (layout) << " => " << shape (coalesce_layout));
CUTLASS_TRACE_HOST(stride(layout) << " " << stride(coalesce_layout));
CUTE_STATIC_ASSERT_V(depth(coalesce_layout) <= Int<1>{});
ASSERT_EQ(size(coalesce_layout), size(layout));
for (int i = 0; i < size(layout); ++i) {
EXPECT_EQ(coalesce_layout(i), layout(i));
}
}
TEST(CuTe_core, Coalesce)
{
{
auto layout = make_layout(Int<1>{}, Int<0>{});
test_coalesce(layout);
}
{
auto layout = make_layout(Int<1>{}, Int<1>{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}, Int<6>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape (Int<2>{}, Int<1>{}, Int<6>{}),
make_stride(Int<1>{}, Int<6>{}, Int<2>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape (Int<2>{}, Int<1>{}, Int<6>{}),
make_stride(Int<1>{}, 7, Int<2>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape (Int<2>{}, Int<1>{}, Int<6>{}),
make_stride(Int<4>{}, 7, Int<8>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(2, Int<4>{}, Int<6>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, 4, Int<6>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}, 6));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}, Int<6>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(2, Int<4>{}, Int<6>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, 4, Int<6>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<4>{}, 6), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, Int<1>{}, Int<3>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, 1, Int<3>{}), GenRowMajor{});
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, 1, Int<3>{}), make_stride(Int<2>{}, 4, Int<4>{}));
test_coalesce(layout);
}
{
auto layout = make_layout(make_shape(Int<2>{}, 1, Int<3>{}), make_stride(Int<2>{}, Int<0>{}, Int<4>{}));
test_coalesce(layout);
}
{
auto layout = Layout<Shape<Shape<_2,_2>,Shape<_2, _2>>,
Stride<Stride<_1,_4>,Stride<_8,_32>>>{};
test_coalesce(layout);
}
}
| test/unit/cute/core/coalesce.cpp/0 | {
"file_path": "test/unit/cute/core/coalesce.cpp",
"repo_id": "test",
"token_count": 1874
} | 50 |
/***************************************************************************************************
* Copyright (c) 2024 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cutlass/trace.h>
#include <cassert>
#include <cstdint>
#include <tuple>
#include <cute/container/tuple.hpp>
#include <cute/container/packed_tuple.hpp>
#include <cute/algorithm/tuple_algorithms.hpp>
#include <cute/tensor.hpp>
namespace pt_test {
template <class T>
struct Nonempty {
T datum;
Nonempty(T const& t) : datum{t} {}
friend bool operator==(Nonempty<T> const& lhs, Nonempty<T> const& rhs) {
return lhs.datum == rhs.datum;
}
friend bool operator!=(Nonempty<T> const& lhs, Nonempty<T> const& rhs) {
return !(lhs == rhs);
}
};
template <int V>
struct Empty {
template <int W>
friend bool operator==(Empty<V> const&, Empty<W> const&) {
return V == W;
}
template <int W>
friend bool operator!=(Empty<V> const& lhs, Empty<W> const& rhs) {
return !(lhs == rhs);
}
};
// std::tuple
static_assert(cute::is_standard_layout_v<std::tuple<>>); // it happens to be
static_assert(cute::is_standard_layout_v<std::tuple<int>>); // it happens to be
static_assert(cute::is_standard_layout_v<std::tuple<double>>); // it happens to be
static_assert(not cute::is_standard_layout_v<std::tuple<int, double>>); // it's not
#if ! defined(CUTLASS_USE_PACKED_TUPLE)
// cute::tuple
static_assert(cute::is_standard_layout_v<cute::tuple<>>); // it happens to be
static_assert(cute::is_standard_layout_v<cute::tuple<int>>); // it happens to be
static_assert(cute::is_standard_layout_v<cute::tuple<double>>); // it happens to be
static_assert(not cute::is_standard_layout_v<cute::tuple<int, double>>); // it's not
#endif // CUTLASS_USE_PACKED_TUPLE
// cute::packed_tuple
static_assert(cute::is_standard_layout_v<cute::packed_tuple<>>);
static_assert(cute::is_standard_layout_v<cute::packed_tuple<int>>);
static_assert(cute::is_standard_layout_v<cute::packed_tuple<double>>);
static_assert(cute::is_standard_layout_v<cute::packed_tuple<int, double>>); // it is
static_assert(cute::is_standard_layout_v<cute::packed_tuple<int, int, int, int>>); // it is
static_assert(cute::is_standard_layout_v<cute::packed_tuple<int, cute::packed_tuple<int, int>, int>>); // it is
static_assert(cute::is_standard_layout_v<cute::packed_tuple<int, cute::packed_tuple<Empty<0>, Empty<0>>, int>>); // it is
//////////////////////////////////////////////////////////////////////
// packed_tuple test starts here
//////////////////////////////////////////////////////////////////////
template <
class ExpectedPackedType,
size_t ExpectedPackedSize,
class ... Args>
constexpr void
test_packed_type_alias([[maybe_unused]] ExpectedPackedType packed, std::tuple<Args...> unpacked)
{
using cute::packed_tuple;
if constexpr ((cute::is_standard_layout_v<Args> && ...)) {
static_assert(cute::is_standard_layout_v<packed_tuple<Args...>>);
}
if constexpr ((cute::is_empty_v<Args> && ...)) {
static_assert(cute::is_empty_v<packed_tuple<Args...>>);
}
static_assert(cute::tuple_size_v<packed_tuple<Args...>> == sizeof...(Args));
auto test_element = [unpacked] (auto index) {
static_assert(cute::is_same_v<
std::tuple_element_t<index, packed_tuple<Args...>>,
std::tuple_element_t<index, std::tuple<Args...>>
>);
packed_tuple<Args...> sl = cute::apply(unpacked, [](auto... a){ return cute::make_packed_tuple(a...); });
EXPECT_EQ(std::get<index>(unpacked), cute::get<index>(sl));
};
cute::for_each(std::make_index_sequence<sizeof...(Args)>(), test_element);
}
void test_packed_type_aliases() {
using cute::packed_tuple;
test_packed_type_alias<packed_tuple<>, 0>({}, {});
test_packed_type_alias<packed_tuple<int>, 1, int>({7}, {7});
test_packed_type_alias<packed_tuple<double>, 1, double>({1.5}, {1.5});
// Make sure that class types are handled the same as scalar types
test_packed_type_alias<packed_tuple<Nonempty<int>>, 1, Nonempty<int>>(
{Nonempty{7}}, {Nonempty{7}});
test_packed_type_alias<packed_tuple<Nonempty<double>>, 1, Nonempty<double>>(
{Nonempty{1.5}}, {Nonempty{1.5}});
test_packed_type_alias<packed_tuple<>, 0, Empty<0>>({}, {});
test_packed_type_alias<packed_tuple<>, 0, Empty<0>, Empty<1>>(
{}, {Empty<0>{}, Empty<1>{}});
test_packed_type_alias<packed_tuple<>, 0, Empty<0>, Empty<1>, Empty<2>>(
{}, {Empty<0>{}, Empty<1>{}, Empty<2>{}});
test_packed_type_alias<packed_tuple<int>, 1, Empty<0>, int>(
{7}, {Empty<0>{}, 7});
test_packed_type_alias<packed_tuple<int>, 1, int, Empty<0>>(
{7}, {7, Empty<0>{}});
test_packed_type_alias<packed_tuple<int>, 1, int, Empty<0>, Empty<1>>(
{7}, {7, Empty<0>{}, Empty<1>{}});
test_packed_type_alias<packed_tuple<int>, 1, Empty<0>, int, Empty<1>>(
{7}, {Empty<0>{}, 7, Empty<1>{}});
test_packed_type_alias<packed_tuple<int>, 1, Empty<0>, Empty<1>, int>(
{7}, {Empty<0>{}, Empty<1>{}, 7});
test_packed_type_alias<packed_tuple<int, double>, 2, int, double, Empty<0>>(
{7, 1.5}, {7, 1.5, Empty<0>{}});
test_packed_type_alias<packed_tuple<int, double>, 2, int, Empty<0>, double>(
{7, 1.5}, {7, Empty<0>{}, 1.5});
test_packed_type_alias<packed_tuple<int, double>, 2, int, double, Empty<0>>(
{7, 1.5}, {7, 1.5, Empty<0>{}});
test_packed_type_alias<packed_tuple<int, double>, 2, int, double, Empty<0>, Empty<1>>(
{7, 1.5}, {7, 1.5, Empty<0>{}, Empty<1>{}});
test_packed_type_alias<packed_tuple<int, double>, 2, int, Empty<0>, double, Empty<1>>(
{7, 1.5}, {7, Empty<0>{}, 1.5, Empty<1>{}});
test_packed_type_alias<packed_tuple<int, double>, 2, int, Empty<0>, Empty<1>, double>(
{7, 1.5}, {7, Empty<0>{}, Empty<1>{}, 1.5});
test_packed_type_alias<packed_tuple<int, double>, 2, Empty<0>, int, Empty<1>, double>(
{7, 1.5}, {Empty<0>{}, 7, Empty<1>{}, 1.5});
test_packed_type_alias<packed_tuple<int, double>, 2, Empty<0>, Empty<1>, int, double>(
{7, 1.5}, {Empty<0>{}, Empty<1>{}, 7, 1.5});
test_packed_type_alias<packed_tuple<int, double, float>, 3, Empty<0>, int, double, float>(
{7, 1.5, 2.5f}, {Empty<0>{}, 7, 1.5, 2.5f});
test_packed_type_alias<packed_tuple<int, double, float>, 3, int, Empty<0>, double, float>(
{7, 1.5, 2.5f}, {7, Empty<0>{}, 1.5, 2.5f});
test_packed_type_alias<packed_tuple<int, double, float>, 3, int, double, Empty<0>, float>(
{7, 1.5, 2.5f}, {7, 1.5, Empty<0>{}, 2.5f});
test_packed_type_alias<packed_tuple<int, double, float>, 3, int, double, float, Empty<0>>(
{7, 1.5, 2.5f}, {7, 1.5, 2.5f, Empty<0>{}});
}
template <class Tuple, size_t Which, class ExpectedElementType>
constexpr bool test_tuple_element() {
return cute::is_same_v<std::tuple_element_t<Which, Tuple>, ExpectedElementType>;
}
void test_tuple_elements() {
using cute::packed_tuple;
static_assert(test_tuple_element<std::tuple<Empty<0>>, 0, Empty<0>>());
static_assert(test_tuple_element<packed_tuple<Empty<0>>, 0, Empty<0>>());
}
// A default-constructible type.
template <size_t Value>
struct DefaultConstructible {};
void test_default_constructibility() {
using cute::packed_tuple;
{
[[maybe_unused]] packed_tuple<> t_p_0;
[[maybe_unused]] packed_tuple<DefaultConstructible<0>> t_p_1;
[[maybe_unused]] packed_tuple<DefaultConstructible<0>, DefaultConstructible<1>> t_p_2;
[[maybe_unused]] packed_tuple<DefaultConstructible<0>, int, DefaultConstructible<1>> t_p_3;
}
}
void test_sizes_and_not_storing_empty_types() {
using cute::packed_tuple;
[[maybe_unused]] packed_tuple<
int,
pt_test::Empty<0>,
double
> pt{42, pt_test::Empty<0>{}, 1.5};
static_assert(cute::is_standard_layout_v<decltype(pt)>);
// packed_result_type must only store the packed tuple,
// and not the integer_sequence(s) used to access it.
// The latter can be represented entirely at compile time as types.
struct { int i; double j; } IntDouble;
static_assert(sizeof(pt) == sizeof(IntDouble));
EXPECT_EQ(cute::get<0>(pt), 42);
EXPECT_EQ(cute::get<1>(pt), pt_test::Empty<0>{});
EXPECT_EQ(cute::get<2>(pt), 1.5);
packed_tuple<
pt_test::Empty<0>,
pt_test::Empty<1>,
packed_tuple<
pt_test::Empty<0>,
pt_test::Empty<1>,
packed_tuple<pt_test::Empty<0>, packed_tuple<>>
>
> pt_empty{};
static_assert(cute::is_empty_v<decltype(pt_empty)>);
static_assert(cute::is_standard_layout_v<decltype(pt_empty)>);
static_assert(sizeof(pt_empty) == 1);
// Template arguments must be default constructible,
// and packed_tuple itself needs a default constructor.
[[maybe_unused]] packed_tuple<
packed_tuple<int, pt_test::Empty<2>>,
double,
pt_test::Empty<3>> pt2;
static_assert(cute::is_standard_layout_v<decltype(pt2)>);
// cute::packed_tuple, like the original cute::tuple, does not
// promise to have working CTAD (constructor template argument
// deduction).
[[maybe_unused]] packed_tuple<
packed_tuple<int, pt_test::Empty<0>>,
pt_test::Empty<1>
> pt3{
packed_tuple<int, pt_test::Empty<0>>{42, pt_test::Empty<0>{}},
pt_test::Empty<1>{}
};
static_assert(cute::is_standard_layout_v<decltype(pt3)>);
static_assert(cute::is_same_v<
cute::tuple_element_t<0, decltype(pt3)>,
packed_tuple<int, pt_test::Empty<0>>>);
static_assert(cute::is_same_v<
cute::tuple_element_t<1, decltype(pt3)>,
pt_test::Empty<1>>);
static_assert(cute::tuple_size_v<cute::tuple_element_t<0, decltype(pt3)>> == 2u);
packed_tuple<int, pt_test::Empty<0>> pt3_0 = cute::get<0>(pt3);
auto pt3_0_1 = cute::get<1>(pt3_0);
static_assert(cute::is_same_v<decltype(pt3_0_1), pt_test::Empty<0>>);
EXPECT_EQ(cute::get<0>(cute::get<0>(pt3)), 42);
EXPECT_EQ(cute::get<1>(cute::get<0>(pt3)), pt_test::Empty<0>{});
}
} // namespace test
TEST(CuTe_core, PackedTuple2)
{
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("packed_tuple");
CUTLASS_TRACE_HOST("-------------------------------");
pt_test::test_packed_type_aliases();
pt_test::test_tuple_elements();
pt_test::test_default_constructibility();
pt_test::test_sizes_and_not_storing_empty_types();
}
TEST(CuTe_core, PackedTuple2Get) {
using cute::packed_tuple;
using pt_test::Empty;
using pt_test::Nonempty;
{
using tuple_type = packed_tuple<int>;
tuple_type pt{42};
static_assert(cute::tuple_size_v<tuple_type> == 1u);
static_assert(cute::is_same_v<cute::tuple_element_t<0, tuple_type>, int>);
EXPECT_EQ(cute::get<0>(pt), 42);
cute::get<0>(pt) = 43;
EXPECT_EQ(cute::get<0>(pt), 43);
}
{
using tuple_type = packed_tuple<int>;
tuple_type const pt{42};
EXPECT_EQ(cute::get<0>(pt), 42);
static_assert(cute::is_same_v<decltype(cute::get<0>(pt)), int const&>);
}
{
EXPECT_EQ(cute::get<0>(packed_tuple<int>{42}), 42);
}
{
using tuple_type = packed_tuple<pt_test::Empty<0>>;
tuple_type pt;
static_assert(cute::tuple_size_v<tuple_type> == 1u);
static_assert(cute::is_same_v<cute::tuple_element_t<0, tuple_type>, pt_test::Empty<0>>);
EXPECT_EQ(cute::get<0>(pt), pt_test::Empty<0>{});
}
{
using tuple_type = packed_tuple<pt_test::Empty<0>>;
tuple_type const pt;
EXPECT_EQ(cute::get<0>(pt), pt_test::Empty<0>{});
}
{
using tuple_type = packed_tuple<pt_test::Empty<0>>;
EXPECT_EQ(cute::get<0>(tuple_type{}), pt_test::Empty<0>{});
}
{
using tuple_type = packed_tuple<int, double>;
tuple_type pt{1, 2.5};
static_assert(cute::tuple_size_v<tuple_type> == 2u);
static_assert(cute::is_same_v<cute::tuple_element_t<0, tuple_type>, int>);
static_assert(cute::is_same_v<cute::tuple_element_t<1, tuple_type>, double>);
EXPECT_EQ(cute::get<0>(pt), 1);
cute::get<0>(pt) = 2;
EXPECT_EQ(cute::get<0>(pt), 2);
EXPECT_EQ(cute::get<1>(pt), 2.5);
cute::get<1>(pt) = 3.5;
EXPECT_EQ(cute::get<1>(pt), 3.5);
}
{
using tuple_type = packed_tuple<int, double>;
tuple_type const pt{1, 2.5};
EXPECT_EQ(cute::get<0>(pt), 1);
static_assert(cute::is_same_v<decltype(cute::get<0>(pt)), int const&>);
EXPECT_EQ(cute::get<1>(pt), 2.5);
static_assert(cute::is_same_v<decltype(cute::get<1>(pt)), double const&>);
}
{
using tuple_type = packed_tuple<int, double>;
EXPECT_EQ(cute::get<0>(tuple_type{1, 2.5}), 1);
EXPECT_EQ(cute::get<1>(tuple_type{1, 2.5}), 2.5);
}
{
using tuple_type = packed_tuple<Empty<0>, double>;
tuple_type pt{Empty<0>{}, 2.5};
static_assert(cute::tuple_size_v<tuple_type> == 2u);
static_assert(cute::is_same_v<cute::tuple_element_t<0, tuple_type>, Empty<0>>);
static_assert(cute::is_same_v<cute::tuple_element_t<1, tuple_type>, double>);
EXPECT_EQ(cute::get<0>(pt), Empty<0>{});
EXPECT_EQ(cute::get<1>(pt), 2.5);
cute::get<1>(pt) = 3.5;
EXPECT_EQ(cute::get<1>(pt), 3.5);
}
{
using tuple_type = packed_tuple<Empty<0>, double>;
tuple_type const pt{Empty<0>{}, 2.5};
EXPECT_EQ(cute::get<0>(pt), Empty<0>{});
static_assert(cute::is_same_v<decltype(cute::get<0>(pt)), Empty<0>>);
EXPECT_EQ(cute::get<1>(pt), 2.5);
static_assert(cute::is_same_v<decltype(cute::get<1>(pt)), double const&>);
}
{
using tuple_type = packed_tuple<Empty<0>, double>;
EXPECT_EQ(cute::get<0>(tuple_type{Empty<0>{}, 2.5}), Empty<0>{});
EXPECT_EQ(cute::get<1>(tuple_type{Empty<0>{}, 2.5}), 2.5);
}
{
using tuple_type = packed_tuple<int, double, Nonempty<float>>;
tuple_type pt{1, 2.5, Nonempty{3.25f}};
static_assert(cute::tuple_size_v<tuple_type> == 3u);
static_assert(cute::is_same_v<cute::tuple_element_t<0, tuple_type>, int>);
static_assert(cute::is_same_v<cute::tuple_element_t<1, tuple_type>, double>);
static_assert(cute::is_same_v<cute::tuple_element_t<2, tuple_type>, Nonempty<float>>);
EXPECT_EQ(cute::get<0>(pt), 1);
EXPECT_EQ(cute::get<1>(pt), 2.5);
EXPECT_EQ(cute::get<2>(pt), Nonempty{3.25f});
cute::get<0>(pt) = 42;
EXPECT_EQ(cute::get<0>(pt), 42);
cute::get<1>(pt) = 4.5;
EXPECT_EQ(cute::get<1>(pt), 4.5);
cute::get<2>(pt) = Nonempty<float>{3.75f};
EXPECT_EQ(cute::get<2>(pt), Nonempty<float>{3.75f});
}
{
using tuple_type = packed_tuple<int, double, Nonempty<float>>;
tuple_type const pt{1, 2.5, Nonempty{3.25f}};
EXPECT_EQ(cute::get<0>(pt), 1);
EXPECT_EQ(cute::get<1>(pt), 2.5);
EXPECT_EQ(cute::get<2>(pt), Nonempty{3.25f});
}
{
using tuple_type = packed_tuple<int, double, Nonempty<float>>;
EXPECT_EQ((cute::get<0>(tuple_type{1, 2.5, Nonempty{3.25f}})), 1);
EXPECT_EQ((cute::get<1>(tuple_type{1, 2.5, Nonempty{3.25f}})), 2.5);
EXPECT_EQ((cute::get<2>(tuple_type{1, 2.5, Nonempty{3.25f}})), Nonempty{3.25f});
}
{
using tuple_type = packed_tuple<int, Empty<0>, Nonempty<float>>;
packed_tuple<int, Empty<0>, Nonempty<float>> pt{1, Empty<0>{}, Nonempty{3.25f}};
static_assert(cute::tuple_size_v<tuple_type> == 3u);
static_assert(cute::is_same_v<cute::tuple_element_t<0, tuple_type>, int>);
static_assert(cute::is_same_v<cute::tuple_element_t<1, tuple_type>, Empty<0>>);
static_assert(cute::is_same_v<cute::tuple_element_t<2, tuple_type>, Nonempty<float>>);
EXPECT_EQ(cute::get<0>(pt), 1);
EXPECT_EQ(cute::get<1>(pt), Empty<0>{});
EXPECT_EQ(cute::get<2>(pt), Nonempty{3.25f});
cute::get<0>(pt) = 42;
EXPECT_EQ(cute::get<0>(pt), 42);
cute::get<2>(pt) = Nonempty<float>{3.75f};
EXPECT_EQ(cute::get<2>(pt), Nonempty<float>{3.75f});
}
{
using tuple_type = packed_tuple<int, Empty<0>, Nonempty<float>>;
tuple_type const pt{1, Empty<0>{}, Nonempty{3.25f}};
EXPECT_EQ(cute::get<0>(pt), 1);
EXPECT_EQ(cute::get<1>(pt), Empty<0>{});
EXPECT_EQ(cute::get<2>(pt), Nonempty{3.25f});
}
{
using tuple_type = packed_tuple<int, Empty<0>, Nonempty<float>>;
EXPECT_EQ((cute::get<0>(tuple_type{1, Empty<0>{}, Nonempty{3.25f}})), 1);
EXPECT_EQ((cute::get<1>(tuple_type{1, Empty<0>{}, Nonempty{3.25f}})), Empty<0>{});
EXPECT_EQ((cute::get<2>(tuple_type{1, Empty<0>{}, Nonempty{3.25f}})), Nonempty{3.25f});
}
}
namespace pt_test {
// An empty class type to which Empty is convertible.
template<int Value>
struct ConvertibleFromEmpty {
constexpr ConvertibleFromEmpty() = default;
constexpr ConvertibleFromEmpty(Empty<Value>) {}
template <int OtherValue>
friend constexpr bool operator==(ConvertibleFromEmpty<Value> const&, ConvertibleFromEmpty<OtherValue> const&) {
return Value == OtherValue;
}
template <int OtherValue>
friend constexpr bool operator!=(ConvertibleFromEmpty<Value> const& lhs, ConvertibleFromEmpty<OtherValue> const& rhs) {
return !(lhs == rhs);
}
};
} // end namespace pt_test
TEST(CuTe_core, PackedTupleConstexprDefaultConstruction) {
// Make sure that packed_tuple's default constructor is constexpr.
// MSVC makes this a bit more challenging than usual.
using pt_test::Empty;
{
[[maybe_unused]] constexpr cute::detail::ESO_t<Empty<0>> eso1{};
[[maybe_unused]] constexpr cute::detail::ESO_t<int64_t> eso2{};
}
{
[[maybe_unused]] constexpr cute::detail::ESO_t<Empty<0>, Empty<1>> eso0{};
[[maybe_unused]] constexpr cute::detail::ESO_t<int64_t, Empty<1>> eso1{};
[[maybe_unused]] constexpr cute::detail::ESO_t<Empty<0>, int64_t> eso2{};
[[maybe_unused]] constexpr cute::detail::ESO_t<int64_t, int64_t> eso3{};
}
}
TEST(CuTe_core, PackedTupleConvertingConstruction) {
using cute::packed_tuple;
using pt_test::ConvertibleFromEmpty;
using pt_test::Empty;
using pt_test::Nonempty;
{
using tuple_type = cute::tuple<Nonempty<int>>;
[[maybe_unused]] tuple_type t(7);
EXPECT_EQ(cute::get<0>(t), Nonempty<int>(7));
}
{
using tuple_type = packed_tuple<Nonempty<int>>;
[[maybe_unused]] tuple_type t(7);
EXPECT_EQ(cute::get<0>(t), Nonempty<int>(7));
}
{
using tuple_type = cute::tuple<ConvertibleFromEmpty<0>>;
[[maybe_unused]] tuple_type t(Empty<0>{});
EXPECT_EQ(cute::get<0>(t), ConvertibleFromEmpty<0>{});
}
{
using tuple_type = packed_tuple<ConvertibleFromEmpty<0>>;
[[maybe_unused]] tuple_type t(Empty<0>{});
EXPECT_EQ(cute::get<0>(t), ConvertibleFromEmpty<0>{});
}
{
using tuple_type = cute::tuple<float, Nonempty<int>>;
[[maybe_unused]] tuple_type t(1.5f, 7);
EXPECT_EQ(cute::get<0>(t), 1.5f);
EXPECT_EQ(cute::get<1>(t), Nonempty<int>(7));
}
{
using tuple_type = packed_tuple<float, Nonempty<int>>;
[[maybe_unused]] tuple_type t(1.5f, 7);
EXPECT_EQ(cute::get<0>(t), 1.5f);
EXPECT_EQ(cute::get<1>(t), Nonempty<int>(7));
}
{
using tuple_type = cute::tuple<Empty<0>, Nonempty<int>>;
[[maybe_unused]] tuple_type t(Empty<0>{}, 7);
EXPECT_EQ(cute::get<0>(t), Empty<0>{});
EXPECT_EQ(cute::get<1>(t), Nonempty<int>(7));
}
{
using tuple_type = packed_tuple<Empty<0>, Nonempty<int>>;
[[maybe_unused]] tuple_type t(Empty<0>{}, 7);
EXPECT_EQ(cute::get<0>(t), Empty<0>{});
EXPECT_EQ(cute::get<1>(t), Nonempty<int>(7));
}
{
using tuple_type = cute::tuple<ConvertibleFromEmpty<0>, Nonempty<int>>;
[[maybe_unused]] tuple_type t(Empty<0>{}, 7);
EXPECT_EQ(cute::get<0>(t), ConvertibleFromEmpty<0>{});
EXPECT_EQ(cute::get<1>(t), Nonempty<int>(7));
}
{
using tuple_type = packed_tuple<ConvertibleFromEmpty<0>, Nonempty<int>>;
[[maybe_unused]] tuple_type t(Empty<0>{}, 7);
EXPECT_EQ(cute::get<0>(t), ConvertibleFromEmpty<0>{});
EXPECT_EQ(cute::get<1>(t), Nonempty<int>(7));
}
{
using inner_tuple_type = cute::tuple<Empty<0>>;
using outer_tuple_type = cute::tuple<inner_tuple_type>;
[[maybe_unused]] outer_tuple_type t(inner_tuple_type{Empty<0>{}});
}
{
using inner_tuple_type = packed_tuple<Empty<0>>;
using outer_tuple_type = packed_tuple<inner_tuple_type>;
[[maybe_unused]] outer_tuple_type t(inner_tuple_type{Empty<0>{}});
}
{
using inner_tuple_type = cute::tuple<ConvertibleFromEmpty<0>>;
using outer_tuple_type = cute::tuple<inner_tuple_type>;
[[maybe_unused]] outer_tuple_type t(inner_tuple_type{Empty<0>{}});
}
{
using inner_tuple_type = packed_tuple<ConvertibleFromEmpty<0>>;
using outer_tuple_type = packed_tuple<inner_tuple_type>;
[[maybe_unused]] outer_tuple_type t(inner_tuple_type{Empty<0>{}});
}
}
| test/unit/cute/core/packed_tuple.cpp/0 | {
"file_path": "test/unit/cute/core/packed_tuple.cpp",
"repo_id": "test",
"token_count": 9311
} | 51 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass_unit_test.h"
#include <iostream>
#include <cstdint>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
namespace cutlass::test {
template <class ElementType, class SmemLayout>
struct SharedStorage
{
cute::ArrayEngine<ElementType, cute::cosize_v<SmemLayout>> smem;
};
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
template <class T, class TiledCopy, class CTA_Tiler, class GmemLayout, class SmemLayout>
__global__ void
tma_test_device_cute(T const* g_in, T* g_out,
CUTE_GRID_CONSTANT TiledCopy const tma, CTA_Tiler cta_tiler,
GmemLayout gmem_layout, SmemLayout smem_layout)
{
using namespace cute;
CUTE_STATIC_ASSERT_V(product_each(shape(cta_tiler)) == product_each(shape(smem_layout)));
// Use Shared Storage structure to allocate and distribute aligned SMEM addresses
extern __shared__ char shared_memory[];
using SharedStorage = SharedStorage<T, SmemLayout>;
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory);
// Construct SMEM tensor
Tensor sB = make_tensor(make_smem_ptr(shared_storage.smem.begin()), smem_layout); // (CTA_TILE_M,CTA_TILE_N,...)
// TMA requires special handling of strides to deal with coord codomain mapping
// Represent the full tensors -- get these from TMA
Tensor mA = make_tensor(make_gmem_ptr<T>(g_in), gmem_layout);
Tensor mB = tma.get_tma_tensor(shape(gmem_layout));
constexpr int R = rank_v<CTA_Tiler>;
Tensor gA = flat_divide(mA, cta_tiler); // (CTA_TILE_M,CTA_TILE_N,...REST_M,REST_N,...)
Tensor gB = flat_divide(mB, cta_tiler); // (CTA_TILE_M,CTA_TILE_N,...REST_M,REST_N,...)
//
// Prepare the TMA_STORE
//
auto cta_tma = tma.get_slice(Int<0>{}); // CTA slice
Tensor tBsB_x = cta_tma.partition_S(sB); // (TMA,TMA_M,TMA_N)
Tensor tBgB_x = cta_tma.partition_D(gB); // (TMA,TMA_M,TMA_N,REST_M,REST_N)
#if 0
if (thread0()) {
print(tma);
print("TILE : "); print(cta_tiler); print("\n");
print(" mB : "); print( mB.data()); print(" o "); print( mB.layout()); print("\n");
print(" gB : "); print( gB.data()); print(" o "); print( gB.layout()); print("\n");
print("tBgB_x: "); print(tBgB_x.data()); print(" o "); print(tBgB_x.layout()); print("\n");
print(" sB : "); print( sB.data()); print(" o "); print( sB.layout()); print("\n");
print("tBsB_x: "); print(tBsB_x.data()); print(" o "); print(tBsB_x.layout()); print("\n");
}
#endif
//
// Perform the TMA_STORE
//
// INPUT: Group the CTA_TILE_X modes and REST_X modes for input
Tensor tAgA = group_modes<0,R>(group_modes<R,rank(gA)>(gA)); // (CTA_TILE, REST)
// OUTPUT: Group the REST_X modes and the TMA_X modes to easily iterate through the tiles
Tensor tBgB = group_modes<1,rank(tBgB_x)>(tBgB_x); // (TMA,REST)
Tensor tBsB = group_modes<1,rank(tBsB_x)>(tBsB_x); // (TMA,REST)
static_assert(size<1>(tBsB) == 1);
#if 0
if (thread0()) {
print("tAgA : "); print(tAgA.data()); print(" o "); print(tAgA.layout()); print("\n");
print("tBsB : "); print(tBsB.data()); print(" o "); print(tBsB.layout()); print("\n");
print("tBgB : "); print(tBgB.data()); print(" o "); print(tBgB.layout()); print("\n");
}
#endif
// Test L2 prefetch
cooperative_prefetch<128>(threadIdx.x, gA);
// Loop over the TMA stages, using smem as our buffer
for (int stage = 0; stage < size<1>(tBgB); ++stage)
{
//
// Read in trivially gmem -> smem
//
// Subbyte elements could cause race conditions, so be even more conservative
if (thread0()) {
copy(tAgA(_,stage), sB);
}
__syncthreads();
cute::cp_async_wait<0>();
//
// Perform the TMA_STORE
//
if (threadIdx.x == 0) {
copy(tma, tBsB(_,0), tBgB(_,stage));
}
tma_store_wait<0>();
__syncthreads();
}
}
template <class T, class TmaType = T, class CopyOp, class GMEM_Layout, class SMEM_Layout, class CTA_Tile>
void
test_tma_store(CopyOp const& copy_op,
GMEM_Layout const& gmem_layout,
SMEM_Layout const& smem_layout,
CTA_Tile const& cta_tile)
{
using namespace cute;
// Allocate and initialize host test data
size_t N = ceil_div(cosize(gmem_layout) * sizeof_bits<T>::value, 8);
thrust::host_vector<uint8_t> h_in(N);
for (size_t i = 0; i < h_in.size(); ++i) {
h_in[i] = uint8_t(i % 13);
}
Tensor hA_in = make_tensor(recast_ptr<T>(h_in.data()), gmem_layout);
// Allocate and initialize device test data
thrust::device_vector<uint8_t> d_in = h_in;
thrust::device_vector<uint8_t> d_out(h_in.size(), uint8_t(-1)); // overflow uint
// Create TMA for this device Tensor
Tensor gA = make_tensor(make_gmem_ptr<T>(raw_pointer_cast(d_out.data())), gmem_layout);
auto tma = make_tma_copy<TmaType>(copy_op, gA, smem_layout, cta_tile, Int<1>{});
//print(tma);
// Launch
int smem_size = int(sizeof(SharedStorage<T, decltype(smem_layout)>));
tma_test_device_cute<<<1, 128, smem_size>>>(
reinterpret_cast<T const*>(raw_pointer_cast(d_in.data())),
reinterpret_cast<T*> (raw_pointer_cast(d_out.data())),
tma, cta_tile,
gmem_layout,
smem_layout);
// Copy results back to host
thrust::host_vector<uint8_t> h_out = d_out;
Tensor hA_out = make_tensor(recast_ptr<T>(h_out.data()), gmem_layout);
// Validate the results. Print only the first 3 errors.
int count = 3;
for (int i = 0; i < int(size(hA_out)) && count > 0; ++i) {
EXPECT_EQ(hA_in(i), hA_out(i));
if (hA_in(i) != hA_out(i)) {
--count;
}
}
}
#endif
} // end namespace cutlass::test
| test/unit/cute/hopper/tma_store_testbed.hpp/0 | {
"file_path": "test/unit/cute/hopper/tma_store_testbed.hpp",
"repo_id": "test",
"token_count": 3089
} | 52 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/warp/mma_tensor_op_sm70.h"
#include "cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "testbed.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_64x64_32x32x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<64, 64, 4>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_128x64_64x32x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<128, 64, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_64x128_32x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<64, 128, 4>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_64x64_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<64, 64, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_64x128_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<64, 128, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_128x64_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<128, 64, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_128x128_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<128, 128, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_128x256_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<128, 256, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_256x128_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<256, 128, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Mixed: F32 accumulation
//
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_64x64_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<64, 64, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_128x256_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<128, 256, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_256x128_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<256, 128, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_128x128_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<128, 128, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_64x64_32x32x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<64, 64, 4>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_64x128_32x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<64, 128, 4>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_128x64_64x32x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<128, 64, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// F32 accumulation, F32 output
//
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_64x64_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<64, 64, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = float;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_64x128_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<64, 128, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = float;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_128x64_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<128, 64, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = float;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_128x128_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<128, 128, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = float;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_128x256_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<128, 256, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = float;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_256x128_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<256, 128, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = float;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_64x64_32x32x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<64, 64, 4>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = float;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_128x64_64x32x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<128, 64, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = float;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_64x128_32x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<64, 128, 4>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = float;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// This works
TEST(SM70_Epilogue_threadblock_epilogue, vec8_f16_f32_volta_tensor_op_64x64_32x32x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<64, 64, 4>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 8;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
// This works
TEST(SM70_Epilogue_threadblock_epilogue, vec2_f16_f32_volta_tensor_op_64x64_32x32x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<64, 64, 4>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 2;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// This fails
TEST(SM70_Epilogue_threadblock_epilogue, vec1_f16_f32_volta_tensor_op_64x64_32x32x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<64, 64, 4>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 1;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM70_Epilogue_threadblock_epilogue, vec1_f32_volta_tensor_op_128x128_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<128, 128, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = float;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 1;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, vec1_f16_f32_volta_tensor_op_128x128_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<128, 128, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 1;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
TEST(SM70_Epilogue_threadblock_epilogue, vec1_f16_f32_volta_tensor_op_128x256_64x64x4) {
//
// Define the warp-level matrix multiply
//
using Shape = cutlass::gemm::GemmShape<128, 256, 4>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>;
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = float;
using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>;
using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = ElementC;
using ElementCompute = ElementC;
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::Mma<
cutlass::gemm::GemmShape<16, 16, 4>,
32,
ElementA,
cutlass::layout::ColumnMajor,
ElementB,
cutlass::layout::RowMajor,
ElementC,
cutlass::layout::RowMajor,
cutlass::arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1>
>;
using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp<
WarpShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
Policy
>;
int const kPartitionsK = 1;
int const kElementsPerAccess = 1;
using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
WarpShape,
kPartitionsK,
ElementC,
kElementsPerAccess,
ElementAccumulator>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kElementsPerAccess,
ElementAccumulator,
ElementCompute
>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/threadblock/epilogue_volta_tensor_op.cu/0 | {
"file_path": "test/unit/epilogue/threadblock/epilogue_volta_tensor_op.cu",
"repo_id": "test",
"token_count": 27577
} | 53 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMV interface
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/kernel/gemv.h"
#include "cutlass/gemm/device/gemv.h"
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "testbed_utils.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
template <typename Gemv>
class TestbedGemv {
public:
using ElementA = typename Gemv::ElementA;
using LayoutA = typename Gemv::LayoutA;
using ElementB = typename Gemv::ElementB;
using ElementC = typename Gemv::ElementC;
using ElementAccumulator = typename Gemv::ElementAccumulator;
using ElementCompute = typename Gemv::EpilogueOutputOp::ElementCompute;
using LayoutV = cutlass::layout::RowMajor;
private:
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutV> tensor_B;
cutlass::HostTensor<ElementC, LayoutV> tensor_C;
cutlass::HostTensor<ElementC, LayoutV> tensor_D;
cutlass::HostTensor<ElementC, LayoutV> reference_D;
public:
//
// Methods
//
TestbedGemv(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2023
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemv::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(
cutlass::MatrixCoord problem_size,
int32_t batch_count
) {
//
// Allocate the GEMV workspace
//
if(std::is_same<LayoutA, cutlass::layout::ColumnMajor>::value) {
tensor_A.resize({problem_size.row(), batch_count * problem_size.column()});
}
else {
tensor_A.resize({batch_count * problem_size.row(), problem_size.column()});
}
tensor_B.resize({batch_count * problem_size.column(), 1});
tensor_C.resize({batch_count * problem_size.row(), 1});
tensor_D.resize({batch_count * problem_size.row(), 1});
reference_D.resize({batch_count * problem_size.row(), 1}, false);
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 1));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 3));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = typename Gemv::ElementA(1);
tensor_B.host_view().at({0, 0}) = typename Gemv::ElementB(1);
tensor_C.host_view().at({0, 0}) = typename Gemv::ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::MatrixCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view());
EXPECT_TRUE(passed) << " mismatched reference";
if (!passed) {
std::ofstream file("testbed_universal_errors.txt");
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view()
<< "\nC =\n" << tensor_C.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\nComputed =\n" << tensor_D.host_view();
}
return passed;
}
/// Verifies the result
bool verify(
cutlass::MatrixCoord problem_size,
int32_t batch_count,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
ElementCompute alpha,
ElementCompute beta) {
//
// Verify
//
cutlass::reference::host::GemmComplex<
typename Gemv::ElementA, typename Gemv::LayoutA,
typename Gemv::ElementB, LayoutV,
typename Gemv::ElementC, LayoutV,
ElementCompute, ElementAccumulator
>(
{problem_size.row(), 1, problem_size.column()},
alpha,
tensor_A.host_ref(),
Gemv::kTransformA,
tensor_B.host_ref(),
Gemv::kTransformB,
beta,
tensor_C.host_ref(),
reference_D.host_ref(),
ElementAccumulator(0),
batch_count,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D
);
return compare_reference(problem_size, alpha, beta);
}
/// Runs one problem size
bool run(
cutlass::MatrixCoord problem_size,
int32_t batch_count,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
ElementCompute alpha,
ElementCompute beta) {
this->initialize(problem_size, batch_count);
//
// Initialize the GEMV operator
//
typename Gemv::Arguments arguments{
problem_size,
batch_count,
{alpha, beta},
tensor_A.device_ref(),
tensor_B.device_data(),
tensor_C.device_data(),
tensor_D.device_data(),
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D
};
Gemv gemm_op;
cutlass::Status status = gemm_op.can_implement(arguments);
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
size_t workspace_size = Gemv::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
status = gemm_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the GEMV
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(
problem_size,
batch_count,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D,
alpha,
beta);
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemv>
bool TestAllGemv() {
using ElementCompute = typename Gemv::EpilogueOutputOp::ElementCompute;
int Batch[] = {
1, 520, 1314
};
int M[] = {
1, 5, 16
};
int K[] = {
8, 128, 256
};
double Alpha[] = {
1, 1.25
};
double Beta[] = {
0, 1, 1.25
};
for (int b : Batch) {
for (int m : M) {
for (int k : K) {
for (double alpha : Alpha) {
for (double beta : Beta) {
TestbedGemv<Gemv> testbed;
if (!testbed.run(
{m, k},
b,
m * k,
k,
m,
m,
ElementCompute(alpha),
ElementCompute(beta))) {
return false;
}
}
}
}
}
}
return true;
}
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM50_Device_Gemv_f16n_f16_f16_simt_f32, RowMajorA) {
using ElementInput = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using ElementAccumulator = float;
int const kElementsPerAccess = 8;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
1,
ElementAccumulator,
ElementAccumulator>;
using Gemv = cutlass::gemm::device::Gemv<
cutlass::gemm::kernel::Gemv<
ElementInput, // Element A
LayoutA, // Layout A
ElementInput, // Element B
ElementOutput, // Element C
ElementAccumulator, // Element accumulator
EpilogueOp, // Output operator
kElementsPerAccess // Element access granularity
>
>;
EXPECT_TRUE(test::gemm::TestAllGemv<Gemv>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM50_Device_Gemv_f32n_f32_f32_simt_f32, RowMajorA) {
using ElementInput = float;
using ElementOutput = float;
using LayoutA = cutlass::layout::RowMajor;
using ElementAccumulator = float;
int const kElementsPerAccess = 4;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
1,
ElementAccumulator,
ElementAccumulator>;
using Gemv = cutlass::gemm::device::Gemv<
cutlass::gemm::kernel::Gemv<
ElementInput, // Element A
LayoutA, // Layout A
ElementInput, // Element B
ElementOutput, // Element C
ElementAccumulator, // Element accumulator
EpilogueOp, // Output operator
kElementsPerAccess // Element access granularity
>
>;
EXPECT_TRUE(test::gemm::TestAllGemv<Gemv>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM50_Device_Gemv_f64n_f64_f64_simt_f64, RowMajorA) {
using ElementInput = double;
using ElementOutput = double;
using LayoutA = cutlass::layout::RowMajor;
using ElementAccumulator = double;
int const kElementsPerAccess = 2;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
1,
ElementAccumulator,
ElementAccumulator>;
using Gemv = cutlass::gemm::device::Gemv<
cutlass::gemm::kernel::Gemv<
ElementInput, // Element A
LayoutA, // Layout A
ElementInput, // Element B
ElementOutput, // Element C
ElementAccumulator, // Element accumulator
EpilogueOp, // Output operator
kElementsPerAccess // Element access granularity
>
>;
EXPECT_TRUE(test::gemm::TestAllGemv<Gemv>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM50_Device_Gemv_f16n_f16_f16_simt_f32, ColumnMajorA) {
using ElementInput = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementAccumulator = float;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
1,
ElementAccumulator,
ElementAccumulator>;
using Gemv = cutlass::gemm::device::Gemv<
cutlass::gemm::kernel::Gemv<
ElementInput, // Element A
LayoutA, // Layout A
ElementInput, // Element B
ElementOutput, // Element C
ElementAccumulator, // Element Accumulator
EpilogueOp // Output operator
>
>;
EXPECT_TRUE(test::gemm::TestAllGemv<Gemv>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM50_Device_Gemv_f32n_f32_f32_simt_f32, ColumnMajorA) {
using ElementInput = float;
using ElementOutput = float;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementAccumulator = float;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
1,
ElementAccumulator,
ElementAccumulator>;
using Gemv = cutlass::gemm::device::Gemv<
cutlass::gemm::kernel::Gemv<
ElementInput, // Element A
LayoutA, // Layout A
ElementInput, // Element B
ElementOutput, // Element C
ElementAccumulator, // Element Accumulator
EpilogueOp // Output operator
>
>;
EXPECT_TRUE(test::gemm::TestAllGemv<Gemv>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM50_Device_Gemv_f64n_f64_f64_simt_f64, ColumnMajorA) {
using ElementInput = double;
using ElementOutput = double;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementAccumulator = double;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
1,
ElementAccumulator,
ElementAccumulator>;
using Gemv = cutlass::gemm::device::Gemv<
cutlass::gemm::kernel::Gemv<
ElementInput, // Element A
LayoutA, // Layout A
ElementInput, // Element B
ElementOutput, // Element C
ElementAccumulator, // Element Accumulator
EpilogueOp // Output operator
>
>;
EXPECT_TRUE(test::gemm::TestAllGemv<Gemv>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/gemv.cu/0 | {
"file_path": "test/unit/gemm/device/gemv.cu",
"repo_id": "test",
"token_count": 7013
} | 54 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide Ptr-Array GEMM interface
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cute/tensor.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/gemm/kernel/tile_scheduler.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "../../common/cutlass_unit_test.h"
#include "gemm_testbed_3x_ptr_array.hpp"
#if defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
using namespace cute;
TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_group_gemm, 128x128x64_2x2x1) {
// A matrix configuration
using ElementA = cutlass::half_t; // Element type for A matrix operand
using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand
constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; // Memory access granularity/alignment of A matrix in units of elements (up to 16 bytes)
// B matrix configuration
using ElementB = cutlass::half_t; // Element type for B matrix operand
using LayoutB = cutlass::layout::ColumnMajor; // Layout type for B matrix operand
constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; // Memory access granularity/alignment of B matrix in units of elements (up to 16 bytes)
// C/D matrix configuration
using ElementC = cutlass::half_t; // Element type for C and D matrix operands
using LayoutC = cutlass::layout::ColumnMajor; // Layout type for C and D matrix operands
constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value; // Memory access granularity/alignment of C matrix in units of elements (up to 16 bytes)
// Core kernel configurations
using ElementAccumulator = float; // Element type for internal accumulation
using ArchTag = cutlass::arch::Sm90; // Tag indicating the minimum SM that supports the intended feature
using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag
using TileShape = Shape<_256,_128,_64>; // Threadblock-level tile size
using ClusterShape = Shape<_2,_2,_1>; // Shape of the threadblocks in a cluster
using StageCountType = cutlass::gemm::collective::StageCountAuto; // Stage count maximized based on the tile size
using KernelSchedule = cutlass::gemm::KernelPtrArrayTmaWarpSpecializedCooperative; // Kernel to launch
using EpilogueSchedule = cutlass::epilogue::PtrArrayNoSmemWarpSpecialized; // Epilogue to launch
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
TileShape, ClusterShape,
cutlass::epilogue::collective::EpilogueTileAuto,
ElementAccumulator, ElementAccumulator,
ElementC, LayoutC *, AlignmentC,
ElementC, LayoutC *, AlignmentC,
EpilogueSchedule
>::CollectiveOp;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
ArchTag, OperatorClass,
ElementA, LayoutA *, AlignmentA,
ElementB, LayoutB *, AlignmentB,
ElementAccumulator,
TileShape, ClusterShape,
cutlass::gemm::collective::StageCountAutoCarveout<
static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
KernelSchedule
>::CollectiveOp;
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
cutlass::gemm::GroupProblemShape<Shape<int,int,int>>,
CollectiveMainloop,
CollectiveEpilogue
>;
using namespace test::gemm::device;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
bool result = TestAll<Gemm>(1.0, 1.0);
EXPECT_TRUE(result);
}
#endif // defined(CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED)
| test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_group_gemm.cu/0 | {
"file_path": "test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_group_gemm.cu",
"repo_id": "test",
"token_count": 2230
} | 55 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for grouped GEMM problem visitors
*/
#pragma once
#include <iostream>
#include <numeric>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_grouped_problem_visitor.h"
#include "cutlass/gemm/kernel/grouped_problem_visitor.h"
#include "cutlass/util/device_memory.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Use simple problem visitor as a baseline
template <typename ProblemSizeHelper,
typename ThreadblockShape,
int PrefetchTileCount,
int ThreadCount>
struct BaselineProblemVisitor : public cutlass::gemm::kernel::BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape> {
using Base = cutlass::gemm::kernel::BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape>;
using Params = typename Base::Params;
static int const kThreadCount = ThreadCount;
struct SharedStorage {};
int32_t tile_count_sum;
SharedStorage &shared_storage;
//
// Methods
//
CUTLASS_DEVICE
BaselineProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base(params_, block_idx),
shared_storage(shared_storage_)
{
cutlass::gemm::GemmCoord problem = this->problem_size();
cutlass::gemm::GemmCoord grid = this->grid_shape(problem);
tile_count_sum = this->tile_count(grid);
}
CUTLASS_DEVICE
bool next_tile() {
if (this->tile_idx < tile_count_sum) {
return true;
}
do {
++this->problem_idx;
if (this->problem_idx >= this->params.problem_count) {
return false;
}
cutlass::gemm::GemmCoord problem = this->problem_size();
cutlass::gemm::GemmCoord grid = this->grid_shape(problem);
this->problem_tile_start = tile_count_sum;
tile_count_sum += this->tile_count(grid);
} while (tile_count_sum <= this->tile_idx);
return true;
}
static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count) {
return 0;
}
static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count,
void* host_workspace_ptr) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename ProblemVisitor>
struct ProblemVisitorKernel {
struct SharedStorage {
typename ProblemVisitor::SharedStorage problem_visitor;
};
struct Params {
typename ProblemVisitor::Params problem_visitor_params;
int32_t* visited_problems_ptr;
int32_t* visited_tiles_ptr;
int32_t visits_per_block;
Params():
visited_problems_ptr(nullptr),
visited_tiles_ptr(nullptr),
visits_per_block(0) {}
Params(typename ProblemVisitor::Params problem_visitor_params_,
int32_t* visited_problems_ptr_,
int32_t* visited_tiles_ptr_,
int32_t visits_per_block_):
problem_visitor_params(problem_visitor_params_),
visited_problems_ptr(visited_problems_ptr_),
visited_tiles_ptr(visited_tiles_ptr_),
visits_per_block(visits_per_block_) {}
};
CUTLASS_DEVICE
void operator()(const Params& params, SharedStorage &shared_storage) {
int32_t store_offset = params.visits_per_block * blockIdx.x;
ProblemVisitor problem_visitor(params.problem_visitor_params,
shared_storage.problem_visitor,
blockIdx.x);
while (problem_visitor.next_tile()) {
int32_t problem_idx = problem_visitor.problem_index();
int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx());
if (threadIdx.x == 0) {
params.visited_problems_ptr[store_offset] = problem_idx;
params.visited_tiles_ptr[store_offset] = threadblock_idx;
++store_offset;
}
problem_visitor.advance(gridDim.x);
}
}
};
template <typename ProblemVisitor>
struct ProblemVisitorRunner {
using BaseKernel = ProblemVisitorKernel<ProblemVisitor>;
using Params = typename BaseKernel::Params;
Params params;
std::vector<cutlass::gemm::GemmCoord> host_problem_sizes;
int32_t problem_count;
int32_t threadblock_count;
int32_t visits_per_block;
cutlass::DeviceAllocation<int32_t> visited_problems;
cutlass::DeviceAllocation<int32_t> visited_tiles;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> device_problem_sizes;
cutlass::DeviceAllocation<uint8_t> workspace;
std::vector<int32_t> host_visited_problems;
std::vector<int32_t> host_visited_tiles;
ProblemVisitorRunner(const std::vector<cutlass::gemm::GemmCoord>& host_problem_sizes_,
int32_t threadblock_count_):
host_problem_sizes(host_problem_sizes_),
problem_count(int32_t(host_problem_sizes_.size())),
threadblock_count(threadblock_count_) {}
/// Initializes GEMM state from arguments.
cutlass::Status initialize() {
size_t workspace_bytes = ProblemVisitor::get_workspace_size(
host_problem_sizes.data(),
problem_count,
threadblock_count);
workspace.reset(workspace_bytes);
std::vector<uint8_t> host_workspace(workspace_bytes);
int32_t tile_count = ProblemVisitor::group_tile_count(host_problem_sizes.data(), problem_count);
ProblemVisitor::host_precompute(host_problem_sizes.data(), problem_count,
threadblock_count, host_workspace.data());
workspace.copy_from_host(host_workspace.data(), workspace_bytes);
device_problem_sizes.reset(problem_count);
device_problem_sizes.copy_from_host(host_problem_sizes.data(), problem_count);
visits_per_block = (tile_count - 1 + threadblock_count) / threadblock_count;
int32_t total_visits = visits_per_block * threadblock_count;
visited_problems.reset(total_visits);
visited_tiles.reset(total_visits);
host_visited_problems.resize(total_visits);
host_visited_tiles.resize(total_visits);
cudaError_t result = cudaMemset(visited_problems.get(), -1, sizeof(int32_t) * total_visits);
if (result != cudaSuccess) {
return cutlass::Status::kErrorInternal;
}
result = cudaMemset(visited_tiles.get(), -1, sizeof(int32_t) * total_visits);
if (result != cudaSuccess) {
return cutlass::Status::kErrorInternal;
}
typename ProblemVisitor::Params pv_params(device_problem_sizes.get(), problem_count, workspace.get(), tile_count);
params = Params(pv_params, visited_problems.get(), visited_tiles.get(), visits_per_block);
return cutlass::Status::kSuccess;
}
bool verify() {
// Sort by problem size and then by threadblock_idx
std::vector<int32_t> indices(host_visited_problems.size());
std::iota(indices.begin(), indices.end(), 0);
std::stable_sort(indices.begin(), indices.end(),
[&](int32_t i1, int32_t i2) {
if (host_visited_problems[i1] == host_visited_problems[i2]) {
return host_visited_tiles[i1] < host_visited_tiles[i2];
}
return host_visited_problems[i1] < host_visited_problems[i2];
});
int32_t idx = 0;
// Skip any entries that were not visited
while (host_visited_problems[indices[idx]] == -1) {
++idx;
}
// Check that each problem visited has the tiles we expect
for (int32_t problem_idx = 0; problem_idx < problem_count; ++problem_idx) {
auto problem = host_problem_sizes[problem_idx];
ProblemVisitor::possibly_transpose_problem(problem);
int32_t problem_tiles = ProblemVisitor::tile_count(ProblemVisitor::grid_shape(problem));
for (int i = 0; i < problem_tiles; ++i) {
EXPECT_EQ(problem_idx, host_visited_problems[indices[idx]]);
EXPECT_EQ(i, host_visited_tiles[indices[idx]]);
++idx;
}
}
return true;
}
bool run(cudaStream_t stream = nullptr) {
cutlass::Status status = initialize();
if (status != cutlass::Status::kSuccess) {
std::cerr << "Initialization failed" << std::endl;
return false;
}
dim3 grid(threadblock_count, 1, 1);
dim3 block(ProblemVisitor::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename BaseKernel::SharedStorage));
cutlass::Kernel<BaseKernel><<<grid, block, smem_size, stream>>>(params);
cudaError_t result = cudaGetLastError();
if (result != cudaSuccess) {
std::cerr << "grid launch failed with error " << cudaGetErrorString(result) << std::endl;
return false;
}
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "cudaDeviceSynchronize failed with error " << cudaGetErrorString(result) << std::endl;
return false;
}
visited_problems.copy_to_host(host_visited_problems.data());
visited_tiles.copy_to_host(host_visited_tiles.data());
return verify();
}
};
template <typename ThreadblockShape,
int PrefetchTileCount,
int ThreadCount,
bool Transpose,
cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode0,
cutlass::gemm::kernel::GroupScheduleMode... Args>
struct TestbedGroupedGemmScheduler {
using PSHelper = cutlass::gemm::kernel::detail::GemmGroupedProblemSizeHelper<ThreadblockShape, Transpose>;
using BaselinePV = BaselineProblemVisitor<PSHelper,
ThreadblockShape,
PrefetchTileCount,
ThreadCount>;
//
// Data members
//
uint32_t seed;
int problem_count;
int threadblock_count;
std::vector<cutlass::gemm::GemmCoord> problem_sizes_host;
//
// Methods
//
TestbedGroupedGemmScheduler(uint32_t seed_ = 3080):
seed(seed_) { srand(seed); }
/// Initializes data structures
void initialize(int32_t scale_factor) {
//
// Choose random problem sizes
//
problem_sizes_host.clear();
problem_sizes_host.resize(problem_count);
for (int32_t i = 0; i < problem_count; ++i) {
cutlass::gemm::GemmCoord problem(
scale_factor * (rand() % 64) + 24,
scale_factor * (rand() % 64) + 24,
scale_factor * (rand() % 64) + 24);
problem_sizes_host.at(i) = problem;
}
}
template <cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode_>
void compare_visitors(const ProblemVisitorRunner<BaselinePV>& baseline_runner) {
using PV = cutlass::gemm::kernel::GemmGroupedProblemVisitor<
ThreadblockShape,
GroupScheduleMode_,
PrefetchTileCount,
ThreadCount,
Transpose>;
ProblemVisitorRunner<PV> runner(problem_sizes_host, threadblock_count);
EXPECT_TRUE(runner.run());
// Check that this problem visitor visits the same problems and tiles as the baseline
EXPECT_EQ(baseline_runner.host_visited_problems, runner.host_visited_problems);
EXPECT_EQ(baseline_runner.host_visited_tiles, runner.host_visited_tiles);
}
template <cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode1_,
cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode2_,
cutlass::gemm::kernel::GroupScheduleMode... Rest>
void compare_visitors(const ProblemVisitorRunner<BaselinePV>& baseline_runner) {
// Compare the next visitor with the baseline visitor
compare_visitors<GroupScheduleMode1_>(baseline_runner);
// Recurse to compare the next visitors
compare_visitors<GroupScheduleMode2_, Rest...>(baseline_runner);
}
/// Executes the test on all scheduler modes
void run(int problem_count, int threadblock_count, int scale_factor=8) {
this->problem_count = problem_count;
this->threadblock_count = threadblock_count;
// Initialize the problem
initialize(scale_factor);
// Run the baseline visitor to which we will compare all other visitors
ProblemVisitorRunner<BaselinePV> baseline_runner(problem_sizes_host, threadblock_count);
EXPECT_TRUE(baseline_runner.run());
compare_visitors<Args...>(baseline_runner);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // device
} // gemm
} // test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_grouped_scheduler.h/0 | {
"file_path": "test/unit/gemm/device/testbed_grouped_scheduler.h",
"repo_id": "test",
"token_count": 5660
} | 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "testbed_gemv.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcr_alpha_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size, -0.5f);
}
TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcr_alpha_beta_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size, 4.5f, -0.5f);
}
TEST(SM50_batched_gemv, 1x64x24x4096_1x8x4x64_1x1x4x64_rcr_alpha_beta_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 24, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size, cutlass::half_t(4.5f), cutlass::half_t(-0.5f));
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
kBatchTileSize>(problem_size);
}
/////////////
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_crc_alpha_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size, -0.5f);
}
TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_crc_alpha_beta_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size, 4.5f, -0.5f);
}
TEST(SM50_batched_gemv, 1x64x24x4096_1x8x4x64_1x1x4x64_crc_alpha_beta_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 24, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::ColumnMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size, cutlass::half_t(4.5f), cutlass::half_t(-0.5f));
}
/////////////
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_fp16_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
///
TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 1;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 2;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>;
static int const kBatchTileSize = 8;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_i8_i32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
int8_t, int32_t, int32_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size);
}
TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcc_alpha_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size, -0.5f);
}
TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcc_alpha_beta_fp32_fp32)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
float, float, float,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size, 4.5f, -0.5f);
}
TEST(SM50_batched_gemv, 1x64x24x4096_1x8x4x64_1x1x4x64_rcc_alpha_beta_fp16_fp16)
{
cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 24, 4096);
using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>;
using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>;
static int const kBatchTileSize = 64;
test::gemm::kernel::batched_gemv_kernel_test<
ThreadBlockShape,
ThreadShape,
cutlass::half_t, float, cutlass::half_t,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor,
cutlass::layout::ColumnMajor,
kBatchTileSize>(problem_size, cutlass::half_t(4.5f), cutlass::half_t(-0.5f));
}
| test/unit/gemm/kernel/batched_gemv.cu/0 | {
"file_path": "test/unit/gemm/kernel/batched_gemv.cu",
"repo_id": "test",
"token_count": 27111
} | 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit testbed for kernel-level GEMM
*/
#pragma once
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/core_io.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/host_uncompress.h"
namespace test {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <typename Mma>
__global__ void kernel_multistage_mma_sparse(cutlass::gemm::GemmCoord problem_size,
typename Mma::IteratorA::Params params_A,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::Params params_B,
typename Mma::IteratorB::TensorRef ref_B,
typename Mma::ElementC *ptr_C,
typename Mma::LayoutC::Stride::Index ldc,
typename Mma::IteratorE::Params params_E,
typename Mma::IteratorE::TensorRef ref_E) {
// Shared storage needed by threadblock-scoped matrix multiply-
// Dynamic shared memory base pointer
extern __shared__ int GemmSharedStorageBase[];
// Declare pointer to dynamic shared memory.
typename Mma::SharedStorage *shared_storage =
reinterpret_cast<typename Mma::SharedStorage *>(GemmSharedStorageBase);
// Compute threadblock location
cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y),
0};
cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM,
tb_tile_offset.k() / Mma::kSparse};
cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(),
tb_tile_offset.n() * Mma::Shape::kN};
cutlass::MatrixCoord tb_offset_E{tb_tile_offset.m() * Mma::Shape::kM,
tb_tile_offset.k() / Mma::kSparse};
// Compute position within threadblock
int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(params_A, ref_A.data(),
{problem_size.m(), problem_size.k() / Mma::kSparse},
tb_thread_id, tb_offset_A);
typename Mma::IteratorB iterator_B(params_B, ref_B.data(),
{problem_size.k(), problem_size.n()},
tb_thread_id, tb_offset_B);
typename Mma::IteratorE iterator_E(
params_E, ref_E.data(),
{problem_size.m(),
problem_size.k() / Mma::kSparse / Mma::kElementsPerElementE},
tb_thread_id, tb_offset_E);
int warp_id = __shfl_sync(0xffffffff, threadIdx.y, 0);
// Construct thread-scoped matrix multiply
Mma mma(*shared_storage, tb_thread_id, warp_id, threadIdx.x);
typename Mma::FragmentC accum;
accum.clear();
int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accum, iterator_A, iterator_B, iterator_E, accum);
// Output results
typename Mma::Operator::IteratorC iterator_C({ptr_C, ldc}, threadIdx.x);
iterator_C.add_tile_offset(
{(tb_tile_offset.m() * Mma::WarpCount::kM) +
(warp_id % Mma::WarpCount::kM),
(tb_tile_offset.n() * Mma::WarpCount::kN) +
(warp_id / Mma::WarpCount::kM)});
iterator_C.store(accum);
}
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Threadblock-level matrix multiply-accumulate
typename MmaCore_>
struct SparseTestbed {
/// Threadblock-level GEMM implementation
using MmaCore = MmaCore_;
using ThreadblockShape = typename MmaCore::Shape;
using WarpShape = typename MmaCore::WarpShape;
using InstructionShape = typename MmaCore::InstructionShape;
using ElementA = typename MmaCore::ElementA;
using LayoutA = typename MmaCore::LayoutA;
using ElementB = typename MmaCore::ElementB;
using LayoutB = typename MmaCore::LayoutB;
using ElementC = typename MmaCore::ElementC;
using LayoutC = typename MmaCore::LayoutC;
using ElementE = typename MmaCore::ElementE;
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using ThreadMapE = typename MmaCore::IteratorThreadMapE;
using AccessTypeA = cutlass::Array<ElementA, ThreadMapA::kElementsPerAccess>;
using AccessTypeB = cutlass::Array<ElementB, ThreadMapB::kElementsPerAccess>;
using AccessTypeE = cutlass::Array<ElementE, ThreadMapE::kElementsPerAccess>;
static int const Stages = MmaCore::kStages;
static cutlass::arch::CacheOperation::Kind const CacheOpA =
MmaCore::kCacheOpA;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
MmaCore::kCacheOpB;
static cutlass::arch::CacheOperation::Kind const CacheOpE =
MmaCore::kCacheOpE;
static int const Sparse = MmaCore::kSparse;
static int const MetaSizeInBits = MmaCore::kMetaSizeInBits;
static int const MaxID2 = MmaCore::kMaxID2;
using LayoutE = cutlass::layout::RowMajor;
using ReorderedLayoutE = typename MmaCore::GmemLayoutE;
static int const ElementsPerElementE = MmaCore::kElementsPerElementE;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK / Sparse>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>;
// Define iterators over tiles from the E operand
using IteratorE =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK /
Sparse /
ElementsPerElementE>,
ElementE, ReorderedLayoutE, 1, ThreadMapE, AccessTypeE>;
// Define the threadblock-scoped pipelined matrix multiply
using Mma = cutlass::gemm::threadblock::SparseMmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
CacheOpA, IteratorB, typename MmaCore::SmemIteratorB, CacheOpB, ElementC,
LayoutC, IteratorE, typename MmaCore::SmemIteratorE, CacheOpE,
typename MmaCore::MmaPolicy, Stages>;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> matrix_A;
cutlass::HostTensor<ElementA, LayoutA> matrix_A_uncompressed;
cutlass::HostTensor<ElementB, LayoutB> matrix_B;
cutlass::HostTensor<ElementC, LayoutC> matrix_C_computed;
cutlass::HostTensor<ElementC, LayoutC> matrix_C_reference;
cutlass::HostTensor<ElementE, LayoutE> matrix_E;
cutlass::HostTensor<ElementE, ReorderedLayoutE> matrix_E_reordered;
cutlass::gemm::GemmCoord problem_size;
float alpha, beta;
//
// Methods
//
/// Allocates workspace in device memory
SparseTestbed(int m, int n, int k, float alpha_ = float(1), float beta_ = float(0))
: problem_size(m, n, k), alpha(alpha_), beta(beta_) {
matrix_A.reset(cutlass::make_Coord(m, k / Sparse));
matrix_A_uncompressed.reset(cutlass::make_Coord(m, k));
matrix_B.reset(cutlass::make_Coord(k, n));
matrix_C_computed.reset(cutlass::make_Coord(m, n));
matrix_C_reference.reset(cutlass::make_Coord(m, n), false);
matrix_E.reset(cutlass::make_Coord(m, k / Sparse / ElementsPerElementE));
matrix_E_reordered.reset(
cutlass::make_Coord(m, k / Sparse / ElementsPerElementE));
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
return true;
}
/// Runs the test
bool run(
dim3 grid, dim3 block,
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_E = cutlass::Distribution::Uniform) {
// Waive the test
if (!sufficient()) {
return true;
}
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementA>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementA>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_A.host_view(), seed, scope_max, scope_min, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(matrix_A.host_data(),
matrix_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(matrix_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementB>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementB>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_B.host_view(), seed + 16, scope_max, scope_min, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(matrix_B.host_data(),
matrix_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(matrix_B.host_view());
} else {
return false;
}
cutlass::reference::host::TensorFill(matrix_C_computed.host_view());
cutlass::reference::host::TensorFill(matrix_C_reference.host_view());
if (init_E == cutlass::Distribution::Uniform) {
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomSparseMeta(
matrix_E.host_view(), seed, MetaSizeInBits);
} else if (init_E == cutlass::Distribution::Identity) {
uint32_t content = (MaxID2 == 1) ? 0x44444444 : 0x4444;
cutlass::reference::host::TensorFill(matrix_E.host_view(),
(ElementE)(content));
} else {
return false;
}
cutlass::reorder_meta(matrix_E_reordered.host_ref(), matrix_E.host_ref(),
{problem_size.m(), problem_size.n(),
problem_size.k() / Sparse / ElementsPerElementE});
matrix_A.sync_device();
matrix_B.sync_device();
matrix_C_computed.sync_device();
matrix_E_reordered.sync_device();
typename IteratorA::Params params_A(matrix_A.layout());
typename IteratorB::Params params_B(matrix_B.layout());
typename IteratorE::Params params_E(matrix_E_reordered.layout());
cudaError_t result;
int smem_size = int(sizeof(typename Mma::SharedStorage));
if (smem_size >= (48 << 10)) {
result = cudaFuncSetAttribute(
test::gemm::threadblock::kernel_multistage_mma_sparse<Mma>,
cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size);
if (result != cudaSuccess) {
return true;
}
result = cudaFuncSetAttribute(
test::gemm::threadblock::kernel_multistage_mma_sparse<Mma>,
cudaFuncAttributePreferredSharedMemoryCarveout, 100);
if (result != cudaSuccess) {
return true;
}
}
test::gemm::threadblock::kernel_multistage_mma_sparse<Mma>
<<<grid, block, smem_size, 0>>>(
problem_size, params_A, matrix_A.device_ref(), params_B,
matrix_B.device_ref(), matrix_C_computed.device_data(),
matrix_C_computed.layout().stride(0), params_E,
matrix_E_reordered.device_ref());
//
// Check error code
//
result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess)
<< " kernel error: " << cudaGetErrorString(result);
matrix_C_computed.sync_host();
cutlass::uncompress(matrix_A_uncompressed.host_ref(), matrix_A.host_ref(),
matrix_E.host_ref(), problem_size.m(),
problem_size.k());
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC>
reference_gemm;
reference_gemm(problem_size, ElementC(alpha),
matrix_A_uncompressed.host_view(), matrix_B.host_view(),
ElementC(beta), matrix_C_reference.host_view());
bool passed = cutlass::reference::host::TensorEquals(
matrix_C_computed.host_view(), matrix_C_reference.host_view());
EXPECT_TRUE(passed);
if (!passed && CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cout
<< __FILE__ << ":" << __LINE__ << " "
<< "A:\n" << matrix_A.host_view() << "\n"
<< "B:\n" << matrix_B.host_view() << "\n"
<< "E:\n" << matrix_E.host_view() << "\n"
<< "Reference:\n"
<< matrix_C_reference.host_view() << "\n"
<< "Computed:\n"
<< matrix_C_computed.host_view() << "\n";
}
EXPECT_GT(cutlass::reference::host::TensorNorm(matrix_C_reference.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(matrix_C_computed.host_view()), 0);
return passed;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace test
| test/unit/gemm/threadblock/mma_multistage_sparse_testbed.h/0 | {
"file_path": "test/unit/gemm/threadblock/mma_multistage_sparse_testbed.h",
"repo_id": "test",
"token_count": 6933
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#pragma once
#include <iostream>
#include <cstdio>
#include <vector>
#include "cutlass/gemm/thread/mma.h"
#include "../kernel/thread/testbed_kernel.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/trace.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
#include <cuda.h>
#include <nvrtc.h>
#include "../cutlass/nvrtc/environment.h"
#include <assert.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace nvrtc {
namespace thread {
#define NVRTC_RETURN_IF_ERROR(api) \
do { \
nvrtcResult _result = api; \
if (_result != NVRTC_SUCCESS) { \
CUTLASS_TRACE_HOST("Nvrtc error: " << _result); \
return false; \
} \
} while(0)
inline const char * cuda_source_fmt = R"""(
#include "kernel/thread/contraction.hpp"
using Operator = %s;
extern "C" __global__ void global_entry(__grid_constant__ Operator::Params const params) {
extern __shared__ char smem[];
Operator op;
op(params, smem);
}
)""";
struct TestbedKernel {
static bool compile(std::string const &kernel, std::vector<const char *> const &opts) {
int sz = std::snprintf(nullptr, 0, cuda_source_fmt, kernel.c_str());
std::vector<char> cuda_source(sz + 1);
std::snprintf(&cuda_source[0], cuda_source.size(), cuda_source_fmt, kernel.c_str());
nvrtcProgram program;
NVRTC_RETURN_IF_ERROR(
nvrtcCreateProgram(
&program,
cuda_source.data(),
nullptr,
static_cast<int32_t>(cutlass::nvrtc::kCutlassHeaderCount),
cutlass::nvrtc::kCutlassHeaders,
cutlass::nvrtc::kCutlassHeaderNames)
);
nvrtcResult compile_result =
nvrtcCompileProgram(
program,
static_cast<int32_t>(opts.size()),
opts.data());
size_t log_size;
NVRTC_RETURN_IF_ERROR(
nvrtcGetProgramLogSize(program, &log_size)
);
if (log_size > 1) {
auto log = std::make_unique<char[]>(log_size);
NVRTC_RETURN_IF_ERROR(
nvrtcGetProgramLog(program, log.get())
);
std::cout << log.get() << std::endl;
}
NVRTC_RETURN_IF_ERROR(compile_result);
NVRTC_RETURN_IF_ERROR(
nvrtcDestroyProgram(&program)
);
return true;
}
};
/// Structure to compute the matrix product
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC
>
struct Testbed {
/// Thread-level matrix multiply-accumulate operator
using Mma = cutlass::gemm::thread::Mma<
Shape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC
>;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
//
// Methods
//
/// Allocates workspace in device memory
Testbed() {
tensor_A.reset(cutlass::make_Coord(Shape::kM, Shape::kK));
tensor_B.reset(cutlass::make_Coord(Shape::kK, Shape::kN));
tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN));
tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
}
static inline bool check_nvrtc_error(nvrtcResult error) {
if (error != NVRTC_SUCCESS) {
std::cerr << "failed to compile ";
return false;
}
return true;
}
/// Runs the test
bool run(std::string const &gemm_traits) {
//
// initialize device memory
//
cutlass::reference::host::BlockFillSequential(
tensor_A.host_data(),
tensor_A.capacity()
);
cutlass::reference::host::BlockFillSequential(
tensor_B.host_data(),
tensor_B.capacity(),
ElementB(1),
ElementB(2)
);
cutlass::reference::host::TensorFill(
tensor_C.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_computed.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_reference.host_view(),
ElementC(0)
);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D_computed.sync_device();
#if 0
// launch kernel
cutlass::gemm::kernel::testbed_kernel<Mma><<< dim3(1, 1), dim3(1, 1, 1) >>>(
tensor_D_computed.device_data(),
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data());
#else
// Instantiate gemm_kernel
nvrtcResult result_nvrtc;
nvrtcProgram program;
static char const *src =
"#include \"cutlass/gemm/thread/mma.h\"\n"
"#include \"cutlass/gemm/gemm.h\"\n"
"#include \"cutlass/layout/matrix.h\"\n"
"#include \"unit/nvrtc/kernel/thread/testbed_kernel.h\"\n"
;
std::string type_name;
#if 0
// TODO Ideally we'd use nvrtcGetTypeName to determine the type, but it cannot resolve enum symbol names
// As altername solution we might want to implement to_string<GemmTraits>() to get the traits string.
nvrtcGetTypeName<typename GemmTraits_>(&type_name);
#else
type_name = gemm_traits;
#endif
result_nvrtc = nvrtcCreateProgram(&program,
src,
NULL,
(int)cutlass::nvrtc::kCutlassHeaderCount,
cutlass::nvrtc::kCutlassHeaders,
cutlass::nvrtc::kCutlassHeaderNames);
check_nvrtc_error(result_nvrtc);
std::string gemm_kernel_instantiation =
"test::nvrtc::kernel::thread::testbed_kernel< " + type_name + " >";
nvrtcAddNameExpression(program, gemm_kernel_instantiation.c_str());
const char *opts[] = {"--gpu-architecture=compute_75",
"--std=c++17",
"--include-path=/usr/local/cuda-10.1/include"};
result_nvrtc = nvrtcCompileProgram(program, 3, opts);
if (result_nvrtc != NVRTC_SUCCESS) {
size_t logSize;
nvrtcGetProgramLogSize(program, &logSize);
std::vector<char> log(logSize);
nvrtcGetProgramLog(program, log.data());
std::cout << "Compile log:" << std::endl << log.data() << std::endl;
}
if (!check_nvrtc_error(result_nvrtc)) {
assert(0);
}
// The lowered name is the name of the template instantiation in the generated PTX code.
char const *gemm_kernel_lowered_name;
nvrtcGetLoweredName(program, gemm_kernel_instantiation.c_str(), &gemm_kernel_lowered_name);
if (!check_nvrtc_error(result_nvrtc)) {
assert(0);
}
// Query the size of the genereated PTX so that we can allocate storage and retrieve it afterwards
size_t ptx_size;
result_nvrtc = nvrtcGetPTXSize(program, &ptx_size);
if (!check_nvrtc_error(result_nvrtc)) {
assert(0);
}
std::vector<char> ptx(ptx_size);
result_nvrtc = nvrtcGetPTX(program, ptx.data());
if (!check_nvrtc_error(result_nvrtc)) {
assert(0);
}
// we do not need the nvrtc program anymore
//nvrtcDestroyProgram(&program);
CUmodule module;
CUresult result_cuda;
result_cuda = cuModuleLoadDataEx(&module, ptx.data(), 0, 0, 0);
if (result_cuda != CUDA_SUCCESS) {
assert(0);
}
CUfunction kernel;
result_cuda = cuModuleGetFunction(&kernel, module, gemm_kernel_lowered_name);
if (result_cuda != CUDA_SUCCESS) {
assert(0);
}
void* d_a = (void*)tensor_A.device_data();
void* d_b = (void*)tensor_B.device_data();
void* d_c = (void*)tensor_C.device_data();
void* d_d = (void*)tensor_D_computed.device_data();
void* args[] = { &d_d, &d_a, &d_b, &d_c };
// CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void** kernelParams, void** extra
result_cuda = cuLaunchKernel(kernel, 1, 1, 1, 1, 1, 1, 0, 0 /*cudaStreamDefault*/, args, 0);
if (result_cuda != CUDA_SUCCESS) {
assert(0);
} else {
}
#endif
// verify no errors
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cout << "CUDA ERROR: " << cudaGetErrorString(result);
return false;
}
tensor_D_computed.sync_host();
//
// Reference implementation
//
//tensor_D_reference.fill(tensor_C.host_view());
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC> reference_gemm;
reference_gemm(
{Shape::kM, Shape::kN, Shape::kK},
ElementC(1),
tensor_A.host_ref(),
tensor_B.host_ref(),
ElementC(0),
tensor_D_reference.host_ref()
);
//
// Verify equivalence
//
// compare
bool passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view()
);
if(!passed) std::cout
<< "A:\n" << tensor_A.host_view() << "\n\n"
<< "B:\n" << tensor_B.host_view() << "\n\n"
<< "C:\n" << tensor_C.host_view() << "\n\n"
<< "Reference:\n" << tensor_D_reference.host_view() << "\n\n"
<< "Computed:\n" << tensor_D_computed.host_view() << std::endl;
std::cout << "passed " << passed << std::endl;
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace nvrtc
} // namespace test
| test/unit/nvrtc/thread/testbed.h/0 | {
"file_path": "test/unit/nvrtc/thread/testbed.h",
"repo_id": "test",
"token_count": 5152
} | 59 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief CUTLASS Library is an object-oriented approach to managing operations implemented by CUTLASS.
Generally,
description - compile-time constant parameters used to instantiate an operation
configuration - runtime parameters with computationally expensive initialization
arguments - runtime parameters that may be passed to an initialized operation with low
computational overhead
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/library/library.h"
#include "cutlass/library/arch_mappings.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T> struct NumericTypeMap;
template <> struct NumericTypeMap<void> {
static NumericTypeID const kId = NumericTypeID::kVoid;
};
template <> struct NumericTypeMap<cutlass::uint1b_t> {
static NumericTypeID const kId = NumericTypeID::kB1;
};
template <> struct NumericTypeMap<cutlass::int4b_t> {
static NumericTypeID const kId = NumericTypeID::kS4;
};
template <> struct NumericTypeMap<int8_t> {
static NumericTypeID const kId = NumericTypeID::kS8;
};
template <> struct NumericTypeMap<int16_t> {
static NumericTypeID const kId = NumericTypeID::kS16;
};
template <> struct NumericTypeMap<int32_t> {
static NumericTypeID const kId = NumericTypeID::kS32;
};
template <> struct NumericTypeMap<int64_t> {
static NumericTypeID const kId = NumericTypeID::kS64;
};
template <> struct NumericTypeMap<cutlass::uint4b_t> {
static NumericTypeID const kId = NumericTypeID::kU4;
};
template <> struct NumericTypeMap<uint8_t> {
static NumericTypeID const kId = NumericTypeID::kU8;
};
template <> struct NumericTypeMap<cutlass::float_e4m3_t> {
static NumericTypeID const kId = NumericTypeID::kFE4M3;
};
template <> struct NumericTypeMap<cutlass::float_e5m2_t> {
static NumericTypeID const kId = NumericTypeID::kFE5M2;
};
template <> struct NumericTypeMap<uint16_t> {
static NumericTypeID const kId = NumericTypeID::kU16;
};
template <> struct NumericTypeMap<uint32_t> {
static NumericTypeID const kId = NumericTypeID::kU32;
};
template <> struct NumericTypeMap<uint64_t> {
static NumericTypeID const kId = NumericTypeID::kU64;
};
template <> struct NumericTypeMap<cutlass::half_t> {
static NumericTypeID const kId = NumericTypeID::kF16;
};
template <> struct NumericTypeMap<float> {
static NumericTypeID const kId = NumericTypeID::kF32;
};
template <> struct NumericTypeMap<double> {
static NumericTypeID const kId = NumericTypeID::kF64;
};
template <> struct NumericTypeMap<cutlass::complex<cutlass::half_t> > {
static NumericTypeID const kId = NumericTypeID::kCF16;
};
template <> struct NumericTypeMap<cutlass::complex<float> > {
static NumericTypeID const kId = NumericTypeID::kCF32;
};
template <> struct NumericTypeMap<cutlass::complex<double> > {
static NumericTypeID const kId = NumericTypeID::kCF64;
};
template <> struct NumericTypeMap<cutlass::bfloat16_t> {
static NumericTypeID const kId = NumericTypeID::kBF16;
};
template <> struct NumericTypeMap<cutlass::tfloat32_t> {
static NumericTypeID const kId = NumericTypeID::kTF32;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T> struct MathOperationMap {
static MathOperationID const kId = MathOperationID::kInvalid;
};
template <> struct MathOperationMap<cutlass::arch::OpMultiplyAdd> {
static MathOperationID const kId = MathOperationID::kMultiplyAdd;
};
template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddFastBF16> {
static MathOperationID const kId = MathOperationID::kMultiplyAddFastBF16;
};
template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddFastF16> {
static MathOperationID const kId = MathOperationID::kMultiplyAddFastF16;
};
template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddSaturate> {
static MathOperationID const kId = MathOperationID::kMultiplyAddSaturate;
};
template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddMixedInputUpcast> {
static MathOperationID const kId = MathOperationID::kMultiplyAddMixedInputUpcast;
};
template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddComplex> {
static MathOperationID const kId = MathOperationID::kMultiplyAddComplex;
};
template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddGaussianComplex> {
static MathOperationID const kId = MathOperationID::kMultiplyAddGaussianComplex;
};
template <> struct MathOperationMap<cutlass::arch::OpXorPopc> {
static MathOperationID const kId = MathOperationID::kXorPopc;
};
template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddFastF32> {
static MathOperationID const kId = MathOperationID::kMultiplyAddFastF32;
};
template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddComplexFastF32> {
static MathOperationID const kId = MathOperationID::kMultiplyAddComplexFastF32;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T> struct LayoutMap;
template <> struct LayoutMap<cutlass::layout::ColumnMajor> {
static LayoutTypeID const kId = LayoutTypeID::kColumnMajor;
};
template <> struct LayoutMap<cutlass::layout::RowMajor> {
static LayoutTypeID const kId = LayoutTypeID::kRowMajor;
};
template <> struct LayoutMap<cutlass::layout::ColumnMajorInterleaved<2>> {
static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK2;
};
template <> struct LayoutMap<cutlass::layout::RowMajorInterleaved<2>> {
static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK2;
};
template <> struct LayoutMap<cutlass::layout::ColumnMajorInterleaved<4>> {
static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK4;
};
template <> struct LayoutMap<cutlass::layout::RowMajorInterleaved<4>> {
static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK4;
};
template <> struct LayoutMap<cutlass::layout::ColumnMajorInterleaved<16>> {
static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK16;
};
template <> struct LayoutMap<cutlass::layout::RowMajorInterleaved<16>> {
static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK16;
};
template <> struct LayoutMap<cutlass::layout::ColumnMajorInterleaved<32>> {
static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK32;
};
template <> struct LayoutMap<cutlass::layout::RowMajorInterleaved<32>> {
static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK32;
};
template <> struct LayoutMap<cutlass::layout::ColumnMajorInterleaved<64>> {
static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK64;
};
template <> struct LayoutMap<cutlass::layout::RowMajorInterleaved<64>> {
static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK64;
};
template <> struct LayoutMap<cutlass::layout::TensorNHWC> {
static LayoutTypeID const kId = LayoutTypeID::kTensorNHWC;
};
template <> struct LayoutMap<cutlass::layout::TensorNDHWC> {
static LayoutTypeID const kId = LayoutTypeID::kTensorNDHWC;
};
template <> struct LayoutMap<cutlass::layout::TensorNCxHWx<32>> {
static LayoutTypeID const kId = LayoutTypeID::kTensorNC32HW32;
};
template <> struct LayoutMap<cutlass::layout::TensorNCxHWx<64>> {
static LayoutTypeID const kId = LayoutTypeID::kTensorNC64HW64;
};
template <> struct LayoutMap<cutlass::layout::TensorCxRSKx<32>> {
static LayoutTypeID const kId = LayoutTypeID::kTensorC32RSK32;
};
template <> struct LayoutMap<cutlass::layout::TensorCxRSKx<64>> {
static LayoutTypeID const kId = LayoutTypeID::kTensorC64RSK64;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T> struct OpcodeClassMap;
template <> struct OpcodeClassMap<arch::OpClassSimt> {
static OpcodeClassID const kId = OpcodeClassID::kSimt;
};
template <> struct OpcodeClassMap<arch::OpClassTensorOp> {
static OpcodeClassID const kId = OpcodeClassID::kTensorOp;
};
template <> struct OpcodeClassMap<arch::OpClassSparseTensorOp> {
static OpcodeClassID const kId = OpcodeClassID::kSparseTensorOp;
};
template <> struct OpcodeClassMap<arch::OpClassWmmaTensorOp> {
static OpcodeClassID const kId = OpcodeClassID::kWmmaTensorOp;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <cutlass::ComplexTransform Transform> struct ComplexTransformMap;
template <> struct ComplexTransformMap<cutlass::ComplexTransform::kNone> {
static cutlass::library::ComplexTransform const kId = cutlass::library::ComplexTransform::kNone;
};
template <> struct ComplexTransformMap<cutlass::ComplexTransform::kConjugate> {
static cutlass::library::ComplexTransform const kId = cutlass::library::ComplexTransform::kConjugate;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <cutlass::conv::Mode T> struct ConvModeMap;
template <> struct ConvModeMap<conv::Mode::kCrossCorrelation> {
static ConvModeID const kId = ConvModeID::kCrossCorrelation;
};
template <> struct ConvModeMap<conv::Mode::kConvolution> {
static ConvModeID const kId = ConvModeID::kConvolution;
};
template <cutlass::conv::Operator T> struct ConvKindMap;
template <> struct ConvKindMap<conv::Operator::kFprop> {
static ConvKind const kId = ConvKind::kFprop;
};
template <> struct ConvKindMap<conv::Operator::kDgrad> {
static ConvKind const kId = ConvKind::kDgrad;
};
template <> struct ConvKindMap<conv::Operator::kWgrad> {
static ConvKind const kId = ConvKind::kWgrad;
};
template <cutlass::conv::IteratorAlgorithm T> struct IteratorAlgorithmMap;
template <> struct IteratorAlgorithmMap<conv::IteratorAlgorithm::kAnalytic> {
static IteratorAlgorithmID const kId = IteratorAlgorithmID::kAnalytic;
};
template <> struct IteratorAlgorithmMap<conv::IteratorAlgorithm::kOptimized> {
static IteratorAlgorithmID const kId = IteratorAlgorithmID::kOptimized;
};
template <> struct IteratorAlgorithmMap<conv::IteratorAlgorithm::kFixedChannels> {
static IteratorAlgorithmID const kId = IteratorAlgorithmID::kFixedChannels;
};
template <> struct IteratorAlgorithmMap<conv::IteratorAlgorithm::kFewChannels> {
static IteratorAlgorithmID const kId = IteratorAlgorithmID::kFewChannels;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Layout>
TensorDescription make_TensorDescription(int alignment = 1) {
TensorDescription desc;
desc.element = NumericTypeMap<Element>::kId;
desc.layout = LayoutMap<Layout>::kId;
desc.alignment = alignment;
desc.log_extent_range = int(sizeof(typename Layout::TensorCoord::Index) - 1) * 8;
desc.log_stride_range = int(sizeof(typename Layout::Stride::Index) - 1) * 8;
return desc;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/library_internal.h/0 | {
"file_path": "tools/library/src/library_internal.h",
"repo_id": "tools",
"token_count": 3995
} | 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines profiling functionality for convolution
*/
#pragma once
#include <vector>
#include <string>
#include <memory>
#include <algorithm>
#include <unordered_map>
// CUTLASS Library includes
#include "cutlass/library/library.h"
#include "cutlass/library/util.h"
#include "cutlass/library/handle.h"
#include "cutlass/library/manifest.h"
#include "cutlass/library/singleton.h"
// Profiler includes
#include "options.h"
#include "device_context.h"
#include "operation_profiler.h"
#include "performance_result.h"
#include "problem_space.h"
#include "reduction_operation_profiler.h"
#if CUTLASS_ENABLE_CUDNN
#include "cudnn_helpers.h"
#endif //#if CUTLASS_ENABLE_CUDNN
#include "debug.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Abstract base class for each math function
class Conv2dOperationProfiler : public OperationProfiler {
public:
/// Problem structure obtained from problem space
struct Conv2dProblem {
int64_t n, h, w, c, p, q, k, r, s;
int64_t groups;
int64_t pad_h, pad_w;
int64_t stride_h, stride_w;
int64_t dilation_h, dilation_w;
std::vector<uint8_t> alpha;
std::vector<uint8_t> beta;
library::SplitKMode split_k_mode;
int64_t split_k_slices;
library::ConvModeID conv_mode;
library::Provider eq_gemm_provider;
// convolution with parallel interleaved reduction
// convolution epilogue (alpha, beta) = (1.0, 0.0)
// reduction epilogue (alpha, beta) = (Conv2dProblem::alpha, Conv2dProblem::beta)
std::vector<uint8_t> alpha_one;
std::vector<uint8_t> beta_zero;
//
// Methods
//
/// Total number of bytes loaded
int64_t bytes(library::ConvDescription const &operation_desc) const;
/// Total number of flops computed
int64_t flops(library::ConvDescription const &operation_desc) const;
void set_default_output_size() {
p = ((h + pad_h - r * dilation_h) / stride_h) + 1;
q = ((w + pad_w - s * dilation_w) / stride_w) + 1;
}
// Returns equivalent gemm problem size for convolution
cutlass::gemm::GemmCoord eq_gemm_size(library::ConvKind const &conv_kind) const {
switch (conv_kind) {
case library::ConvKind::kFprop: return cutlass::gemm::GemmCoord(int(n * p * q), int(k), int(r * s * c / groups));
case library::ConvKind::kDgrad: return cutlass::gemm::GemmCoord(int(n * h * w), int(c), int(k * r * s));
case library::ConvKind::kWgrad: return cutlass::gemm::GemmCoord(int(k), int(r * s * c), int(n * p * q));
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
// Returns extent for tensor A
std::vector<int> extent_a(library::ConvKind const &conv_kind) const {
switch (conv_kind) {
case library::ConvKind::kFprop: return {int(n), int(h), int(w), int(c)};
case library::ConvKind::kDgrad: return {int(n), int(p), int(q), int(k)};
case library::ConvKind::kWgrad: return {int(n), int(p), int(q), int(k)};
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
// Returns extent for tensor B
std::vector<int> extent_b(library::ConvKind const &conv_kind) const {
switch (conv_kind) {
case library::ConvKind::kFprop: return {int(k), int(r), int(s), int(c / groups)};
case library::ConvKind::kDgrad: return {int(k), int(r), int(s), int(c)};
case library::ConvKind::kWgrad: return {int(n), int(h), int(w), int(c)};
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
// Returns extent for tensor C
std::vector<int> extent_c(library::ConvKind const &conv_kind) const {
switch (conv_kind) {
case library::ConvKind::kFprop: return {int(n), int(p), int(q), int(k)};
case library::ConvKind::kDgrad: return {int(n), int(h), int(w), int(c)};
case library::ConvKind::kWgrad: return {int(k), int(r), int(s), int(c)};
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
// Returns layout for equivalent gemm matrix A
library::LayoutTypeID eq_gemm_layout_a(library::ConvKind const &conv_kind) const {
switch (conv_kind) {
case library::ConvKind::kFprop: return library::LayoutTypeID::kRowMajor; // TN Gemm
case library::ConvKind::kDgrad: return library::LayoutTypeID::kRowMajor; // TT Gemm
case library::ConvKind::kWgrad: return library::LayoutTypeID::kColumnMajor; // NT Gemm
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
// Returns layout for equivalent gemm matrix B
library::LayoutTypeID eq_gemm_layout_b(library::ConvKind const &conv_kind) const {
switch (conv_kind) {
case library::ConvKind::kFprop: return library::LayoutTypeID::kColumnMajor; // TN Gemm
case library::ConvKind::kDgrad: return library::LayoutTypeID::kRowMajor; // TT Gemm
case library::ConvKind::kWgrad: return library::LayoutTypeID::kRowMajor; // NT Gemm
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
// Returns layout for equivalent gemm matrix C
library::LayoutTypeID eq_gemm_layout_c(library::ConvKind const &conv_kind) const {
switch (conv_kind) {
// Gemm operator assumes column-major output
case library::ConvKind::kFprop:
case library::ConvKind::kDgrad:
case library::ConvKind::kWgrad: return library::LayoutTypeID::kColumnMajor;
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
// Returns leading dimension for equivalent gemm matrix A
int64_t eq_gemm_lda(library::ConvKind const &conv_kind) const {
switch (conv_kind) {
case library::ConvKind::kFprop: return eq_gemm_size(conv_kind).k();
case library::ConvKind::kDgrad: return eq_gemm_size(conv_kind).k();
case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).m();
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
// Returns leading dimension for equivalent gemm matrix B
int64_t eq_gemm_ldb(library::ConvKind const &conv_kind) const {
switch (conv_kind) {
case library::ConvKind::kFprop: return eq_gemm_size(conv_kind).k();
case library::ConvKind::kDgrad: return eq_gemm_size(conv_kind).n();
case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).n();
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
// Returns leading dimension for equivalent gemm matrix C
int64_t eq_gemm_ldc(library::ConvKind const &conv_kind) const {
switch (conv_kind) {
case library::ConvKind::kFprop:
case library::ConvKind::kDgrad:
case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).m();
default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
};
/// Workspace used
struct Conv2dWorkspace {
/// Conv device allocations
DeviceAllocation *A;
DeviceAllocation *B;
DeviceAllocation *reordered_B;
DeviceAllocation *C;
DeviceAllocation *Computed;
DeviceAllocation *Reference;
/// Library configuration and arguments for convolution operator
library::Conv2dConfiguration configuration;
library::ConvArguments arguments;
/// Number of copies of the problem workspace which are visited sequentially during
/// profiling to avoid camping in the last level cache.
int problem_count;
/// Buffer used for the cutlass conv2d operations' host workspace
std::vector<uint8_t> host_workspace;
/// Buffer used for the cutlass operations' device workspace
DeviceAllocation device_workspace;
/// Library configuration and arguments for reduction operator
library::ReductionConfiguration reduction_configuration;
library::ReductionArguments reduction_arguments;
/// Buffer used for the cutlass reduction operations' host workspace
std::vector<uint8_t> reduction_host_workspace;
/// Host data buffers for host reference operation
/// host buffer for tensor
std::vector<uint8_t> host_tensor_a;
/// host buffer for tensor b
std::vector<uint8_t> host_tensor_b;
/// host buffer for tensor c
std::vector<uint8_t> host_tensor_c;
//
// Methods
//
Conv2dWorkspace()
: A(nullptr),
B(nullptr),
reordered_B(nullptr),
C(nullptr),
Computed(nullptr),
Reference(nullptr) {}
// Set stride vector for tensor activations, filters, output
void set_stride_vector(Conv2dProblem const &problem,
library::ConvKind const &conv_kind,
library::LayoutTypeID const &layout_a,
library::LayoutTypeID const &layout_b,
library::LayoutTypeID const &layout_c) {
std::vector<int64_t> stride_activations;
std::vector<int64_t> stride_filters;
std::vector<int64_t> stride_output;
// Strides for interleaved fprop
if (conv_kind == library::ConvKind::kFprop &&
((layout_a == library::LayoutTypeID::kTensorNC32HW32 &&
layout_b == library::LayoutTypeID::kTensorC32RSK32 &&
layout_c == library::LayoutTypeID::kTensorNC32HW32) ||
(layout_a == library::LayoutTypeID::kTensorNC64HW64 &&
layout_b == library::LayoutTypeID::kTensorC64RSK64 &&
layout_c == library::LayoutTypeID::kTensorNC64HW64))) {
int interleave =
(layout_a == library::LayoutTypeID::kTensorNC32HW32) ? 32 : 64;
stride_activations.push_back(int(problem.w) * interleave);
stride_activations.push_back(int(problem.w) * int(problem.h) *
interleave);
stride_activations.push_back(int(problem.h) * int(problem.w) *
int(problem.c));
stride_filters.push_back(int(problem.k) * interleave);
stride_filters.push_back(int(problem.k) * int(problem.s) * interleave);
stride_filters.push_back(int(problem.k) * int(problem.s) *
int(problem.r) * interleave);
stride_output.push_back(int(problem.q) * interleave);
stride_output.push_back(int(problem.q) * int(problem.p) * interleave);
stride_output.push_back(int(problem.q) * int(problem.p) *
int(problem.k));
} else {
// Strides for the rest cases
stride_activations.push_back(int(problem.c));
stride_activations.push_back(int(problem.w) * int(problem.c));
stride_activations.push_back(int(problem.h) * int(problem.w) *
int(problem.c));
stride_filters.push_back(int(problem.c / problem.groups));
stride_filters.push_back(int(problem.s) * int(problem.c / problem.groups));
stride_filters.push_back(int(problem.r) * int(problem.s) *
int(problem.c / problem.groups));
stride_output.push_back(int(problem.k));
stride_output.push_back(int(problem.q) * int(problem.k));
stride_output.push_back(int(problem.q) * int(problem.p) *
int(problem.k));
}
switch (conv_kind) {
case library::ConvKind::kFprop:
configuration.stride_a = stride_activations;
configuration.stride_b = stride_filters;
configuration.stride_c = stride_output;
break;
case library::ConvKind::kDgrad:
configuration.stride_a = stride_output;
configuration.stride_b = stride_filters;
configuration.stride_c = stride_activations;
break;
case library::ConvKind::kWgrad:
configuration.stride_a = stride_output;
configuration.stride_b = stride_activations;
configuration.stride_c = stride_filters;
break;
default:
throw std::runtime_error(
"Invalid Conv Operator (fprop, dgrad, wgrad)");
}
}
};
protected:
//
// Data members
//
/// CONV problem obtained from problem space
Conv2dProblem problem_;
/// Device memory allocations
Conv2dWorkspace conv_workspace_;
/// CUTLASS parallel reduction operation to follow this* conv2d operation
library::Operation const *reduction_op_;
public:
//
// Methods
//
/// Ctor
Conv2dOperationProfiler(Options const &options);
/// Destructor
virtual ~Conv2dOperationProfiler();
Conv2dProblem const& problem() const { return problem_; }
/// Prints usage statement for the math function
virtual void print_usage(std::ostream &out) const;
/// Prints examples
virtual void print_examples(std::ostream &out) const;
/// Extracts the problem dimensions
virtual Status initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Initializes workspace
virtual Status initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Verifies CUTLASS against references
virtual bool verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Measures performance results
virtual bool profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
protected:
/// Method to profile an initialized CUTLASS operation
virtual Status profile_cutlass_(
double &runtime,
Options const &options,
library::Operation const *operation,
void *arguments,
void *host_workspace,
void *device_workspace);
/// Initialize reduction problem dimensions and library::Operation
bool initialize_reduction_configuration_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Initializes the performance result
void initialize_result_(
PerformanceResult &result,
Options const &options,
library::ConvDescription const &operation_desc,
ProblemSpace const &problem_space);
/// Verifies CUTLASS against host reference
bool verify_with_host_reference_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
/// Verifies CUTLASS against device reference
bool verify_with_device_reference_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
#if CUTLASS_ENABLE_CUDNN
/// Verifies CUTLASS against cudnn reference
bool verify_with_cudnn_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem);
#endif //#if CUTLASS_ENABLE_CUDNN
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/include/cutlass/profiler/conv2d_operation_profiler.h/0 | {
"file_path": "tools/profiler/include/cutlass/profiler/conv2d_operation_profiler.h",
"repo_id": "tools",
"token_count": 6686
} | 61 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <stdio.h>
#include "cutlass/cutlass.h"
/**
* \file
* \brief C++ interface to dump fragments and shared memory contents for
* debugging.
*/
namespace cutlass {
namespace debug {
/******************************************************************************
* Dump the fragments
******************************************************************************/
/// The first N threads dump the first M elements from their fragments with a
/// stride of S elements. If N is not specified, dump the data of all the
/// threads. If M is not specified, dump all the elements of the fragment.
template <typename Fragment>
CUTLASS_DEVICE void dump_fragment(Fragment const& frag, int N = 0, int M = 0,
int S = 1) {
int total_threads = blockDim.x * blockDim.y * blockDim.z;
int block_id =
blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int thread_id = (threadIdx.z * (blockDim.x * blockDim.y)) +
(threadIdx.y * blockDim.x) + threadIdx.x;
if (N < 0 || N > total_threads) {
if (thread_id == 0 && block_id == 0)
printf("Thread number N = %d should between [1, %d].\n", N,
total_threads);
__syncthreads();
return;
}
int total_elements = int(frag.size());
if (M < 0 || M > total_elements) {
if (thread_id == 0 && block_id == 0)
printf("Element number M = %d should between [1, %d].\n", M,
total_elements);
__syncthreads();
return;
}
if (N == 0) N = total_threads;
if (M == 0) M = total_elements;
if (S < 1 || S > M) {
if (thread_id == 0 && block_id == 0)
printf("Stride S = %d should between [1, %d].\n", S, M);
__syncthreads();
return;
}
if (thread_id == 0 && block_id == 0)
printf("\n*******************Dumping the fragments*******************\n\n");
CUTLASS_PRAGMA_NO_UNROLL
for (int tid = 0; tid < N; ++tid) {
if (tid == thread_id) {
printf("TB%d W%d T%d: ", block_id, tid / 32, tid & 31);
CUTLASS_PRAGMA_NO_UNROLL
for (int i = 0; i < M; i += S) {
printf("%.0f ", float(typename Fragment::value_type(frag[i])));
}
printf("\n");
}
__syncthreads();
}
if (thread_id == 0 && block_id == 0)
printf("\n***********************************************************\n\n");
__syncthreads();
return;
}
/******************************************************************************
* Dump the shared memory
******************************************************************************/
#define SHMEM_ROW_SIZE 128
/// Dump the shared memory contents. ptr is the begin address, size specifies
/// the number of elements that need to be dumped, and S specifies the stride.
template <typename Element>
CUTLASS_DEVICE void dump_shmem(Element const* ptr, size_t size, int S = 1) {
int block_id =
blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int thread_id = (threadIdx.z * (blockDim.x * blockDim.y)) +
(threadIdx.y * blockDim.x) + threadIdx.x;
if (ptr == nullptr) {
if (thread_id == 0 && block_id == 0) printf("ptr is null.\n");
__syncthreads();
return;
}
if (size < 1) {
if (thread_id == 0 && block_id == 0)
printf("Element size is less than 1\n");
__syncthreads();
return;
}
int row_elements = SHMEM_ROW_SIZE / sizeof(Element);
if (S < 1 || S > row_elements) {
if (thread_id == 0 && block_id == 0)
printf("Stride S = %d should between [1, %d].\n", S, row_elements);
__syncthreads();
return;
}
__syncthreads();
if (thread_id == 0)
printf("\n********Dumping the shared memory of TB %d*******\n\n", block_id);
if (thread_id == 0) {
for (int i = 0; i < size; i += row_elements) {
for (int j = 0; j < row_elements; j += S) {
printf("%.0f ", float(ptr[i + j]));
}
printf("\n");
}
}
if (thread_id == 0)
printf("\n***********************************************************\n\n");
__syncthreads();
return;
}
} // namespace debug
} // namespace cutlass
| tools/util/include/cutlass/util/device_dump.h/0 | {
"file_path": "tools/util/include/cutlass/util/device_dump.h",
"repo_id": "tools",
"token_count": 2057
} | 62 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/*! \file
\brief HostTensor contributes management for both host and device memory.
HostTensor allocates host and device memory upon construction. Basic element-wise operations on
host memory synchronize device memory automatically. Explicit copy operations provide abstractions
for CUDA memcpy operations.
Call {host, device}_{data, ref, view}() for accessing host or device memory.
See cutlass/tensor_ref.h and cutlass/tensor_view.h for more details.
*/
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref_planar_complex.h"
#include "cutlass/tensor_view_planar_complex.h"
#include "device_memory.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Host tensor
template <
/// Data type of element stored within tensor (concept: NumericType)
typename Element_,
/// Defines a mapping from logical coordinate to linear memory (concept: Layout)
typename Layout_
>
class HostTensorPlanarComplex {
public:
/// Data type of individual access
using Element = Element_;
/// Mapping function from logical coordinate to linear memory
using Layout = Layout_;
/// Logical rank of tensor index space
static int const kRank = Layout::kRank;
/// Index type
using Index = typename Layout::Index;
/// Long index used for pointer offsets
using LongIndex = typename Layout::LongIndex;
/// Coordinate in logical tensor space
using TensorCoord = typename Layout::TensorCoord;
/// Layout's stride vector
using Stride = typename Layout::Stride;
/// Tensor reference to device memory
using TensorRef = TensorRefPlanarComplex<Element, Layout>;
/// Tensor reference to constant device memory
using ConstTensorRef = typename TensorRef::ConstTensorRef;
/// Tensor reference to device memory
using TensorView = TensorViewPlanarComplex<Element, Layout>;
/// Tensor reference to constant device memory
using ConstTensorView = typename TensorView::ConstTensorView;
/// Reference to element in tensor
using Reference = typename TensorRef::Reference;
/// Constant reference to element in tensor
using ConstReference = typename ConstTensorRef::Reference;
private:
//
// Data members
//
/// Extent of tensor in logical dimensions
TensorCoord extent_;
/// Layout object
Layout layout_;
/// Host-side memory allocation
std::vector<Element> host_;
/// Device-side memory
device_memory::allocation<Element> device_;
public:
//
// Device and Host Methods
//
/// Default constructor
HostTensorPlanarComplex() {}
/// Constructs a tensor given an extent. Assumes a packed layout
HostTensorPlanarComplex(
TensorCoord const &extent,
bool device_backed = true
) {
this->reset(extent, Layout::packed(extent), device_backed);
}
/// Constructs a tensor given an extent and layout
HostTensorPlanarComplex(
TensorCoord const &extent,
Layout const &layout,
bool device_backed = true
) {
this->reset(extent, layout, device_backed);
}
~HostTensorPlanarComplex() { }
/// Clears the HostTensor allocation to size/capacity = 0
void reset() {
extent_ = TensorCoord();
layout_ = Layout::packed(extent_);
host_.clear();
device_.reset();
}
/// Resizes internal memory allocations without affecting layout or extent
void reserve(
size_t count, ///< size of tensor in elements
bool device_backed_ = true) { ///< if true, device memory is also allocated
device_.reset();
host_.clear();
host_.resize(count * 2);
// Allocate memory
Element* device_memory = nullptr;
if (device_backed_) {
device_memory = device_memory::allocate<Element>(count * 2);
}
device_.reset(device_memory, device_backed_ ? count * 2 : 0);
}
/// Updates the extent and layout of the HostTensor. Allocates memory according to the new
/// extent and layout.
void reset(
TensorCoord const &extent, ///< extent of logical tensor
Layout const &layout, ///< layout object of tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
extent_ = extent;
layout_ = layout;
reserve(size_t(layout_.capacity(extent_)), device_backed_);
}
/// Updates the extent and layout of the HostTensor. Allocates memory according to the new
/// extent and layout. Assumes a packed tensor configuration.
void reset(
TensorCoord const &extent, ///< extent of logical tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
reset(extent, Layout::packed(extent), device_backed_);
}
/// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity.
/// To force allocation, call reset().
void resize(
TensorCoord const &extent, ///< extent of logical tensor
Layout const &layout, ///< layout object of tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
extent_ = extent;
layout_ = layout;
LongIndex new_size = size_t(layout_.capacity(extent_));
if (static_cast<decltype(host_.size())>(new_size * 2) > host_.size()) {
reserve(new_size);
}
}
/// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity.
/// To force allocation, call reset(). Note, this form of resize() assumes a packed tensor configuration.
void resize(
TensorCoord const &extent, ///< extent of logical tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
resize(extent, Layout::packed(extent), device_backed_);
}
/// Returns the number of elements stored in the host tensor
size_t size() const {
return host_.size() / 2;
}
/// Returns the logical capacity based on extent and layout. May differ from size().
LongIndex capacity() const {
return layout_.capacity(extent_);
}
/// Stride between real and imaginary parts
LongIndex imaginary_stride() const {
return host_.size() / 2;
}
/// Gets pointer to host data
Element * host_data() { return host_.data(); }
/// Gets pointer to host data imaginary part
Element * host_data_imag() { return host_.data() + imaginary_stride(); }
/// Gets pointer to host data with a pointer offset
Element * host_data_ptr_offset(LongIndex ptr_element_offset) { return host_data() + ptr_element_offset; }
/// Gets pointer to host data with a pointer offset
Element * host_data_imag_ptr_offset(LongIndex ptr_element_offset) { return host_data_imag() + ptr_element_offset; }
/// Gets a reference to an element in host memory
Reference host_data(LongIndex idx) {
return PlanarComplexReference<Element>(host_data() + idx, host_data_imag() + idx);
}
/// Gets pointer to host data
Element const * host_data() const { return host_.data(); }
/// Gets pointer to host data imaginary part
Element const * host_data_imag() const { return host_.data() + imaginary_stride(); }
/// Gets a constant reference to an element in host memory
ConstReference host_data(LongIndex idx) const {
return PlanarComplexReference<Element const>(host_data() + idx, host_data_imag() + idx);
}
/// Gets pointer to device data
Element * device_data() { return device_.get(); }
/// Gets pointer to device data with a pointer offset
Element * device_data_ptr_offset(LongIndex ptr_element_offset) { return device_.get() + ptr_element_offset; }
/// Gets pointer to device data
Element const * device_data() const { return device_.get(); }
/// Gets pointer to device data with a pointer offset
Element const * device_data_ptr_offset(LongIndex ptr_element_offset) const { return device_.get() + ptr_element_offset; }
/// Gets a pointer to the device data imaginary part
Element * device_data_imag() { return device_.get() + imaginary_stride(); }
/// Accesses the tensor reference pointing to data
TensorRef host_ref(LongIndex ptr_element_offset=0) {
return TensorRef(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride());
}
/// Returns a tensor reference to the real part of the tensor
cutlass::TensorRef<Element, Layout> host_ref_real() {
return cutlass::TensorRef<Element, Layout>(host_data(), layout_);
}
/// Returns a tensor reference to the real part of the tensor
cutlass::TensorRef<Element, Layout> host_ref_imag() {
return cutlass::TensorRef<Element, Layout>(host_data_ptr_offset(imaginary_stride()), layout_);
}
/// Accesses the tensor reference pointing to data
ConstTensorRef host_ref(LongIndex ptr_element_offset=0) const {
return ConstTensorRef(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride());
}
/// Accesses the tensor reference pointing to data
TensorRef device_ref(LongIndex ptr_element_offset=0) {
return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride());
}
/// Accesses the tensor reference pointing to data
ConstTensorRef device_ref(LongIndex ptr_element_offset=0) const {
return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride());
}
/// Returns a tensor reference to the real part of the tensor
cutlass::TensorRef<Element, Layout> device_ref_real() {
return cutlass::TensorRef<Element, Layout>(device_data(), layout_);
}
/// Returns a tensor reference to the real part of the tensor
cutlass::TensorRef<Element, Layout> device_ref_imag() {
return cutlass::TensorRef<Element, Layout>(device_data_ptr_offset(imaginary_stride()), layout_);
}
/// Accesses the tensor reference pointing to data
TensorView host_view(LongIndex ptr_element_offset=0) {
return TensorView(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_);
}
/// Accesses the tensor reference pointing to data
ConstTensorView host_view(LongIndex ptr_element_offset=0) const {
return ConstTensorView(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_);
}
/// Accesses the tensor reference pointing to data
cutlass::TensorView<Element, Layout> host_view_real() {
return cutlass::TensorView<Element, Layout>(host_data(), layout_, extent_);
}
/// Accesses the tensor reference pointing to data
cutlass::TensorView<Element, Layout> host_view_imag() {
return cutlass::TensorView<Element, Layout>(host_data_ptr_offset(imaginary_stride()), layout_, extent_);
}
/// Accesses the tensor reference pointing to data
TensorView device_view(LongIndex ptr_element_offset=0) {
return TensorView(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_);
}
/// Accesses the tensor reference pointing to data
ConstTensorView device_view(LongIndex ptr_element_offset=0) const {
return ConstTensorView(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_);
}
/// Accesses the tensor reference pointing to data
cutlass::TensorView<Element, Layout> device_view_real() {
return cutlass::TensorView<Element, Layout>(device_data(), layout_, extent_);
}
/// Accesses the tensor reference pointing to data
cutlass::TensorView<Element, Layout> device_view_imag() {
return cutlass::TensorView<Element, Layout>(device_data_ptr_offset(imaginary_stride()), layout_, extent_);
}
/// Returns true if device memory is allocated
bool device_backed() const {
return (device_.get() == nullptr) ? false : true;
}
/// Returns the layout object
Layout layout() const {
return layout_;
}
/// Returns the layout object's stride vector
Stride stride() const {
return layout_.stride();
}
/// Returns the layout object's stride in a given physical dimension
Index stride(int dim) const {
return layout_.stride().at(dim);
}
/// Computes the offset of an index from the origin of the tensor
LongIndex offset(TensorCoord const& coord) const {
return layout_(coord);
}
/// Returns a reference to the element at the logical Coord in host memory
Reference at(TensorCoord const& coord) {
return host_data(offset(coord));
}
/// Returns a const reference to the element at the logical Coord in host memory
ConstReference at(TensorCoord const& coord) const {
return host_data(offset(coord));
}
/// Returns the extent of the tensor
TensorCoord extent() const {
return extent_;
}
/// Returns the extent of the tensor
TensorCoord & extent() {
return extent_;
}
/// Copies data from device to host
void sync_host() {
if (device_backed()) {
device_memory::copy_to_host(
host_data(), device_data(), imaginary_stride() * 2);
}
}
/// Copies data from host to device
void sync_device() {
if (device_backed()) {
device_memory::copy_to_device(
device_data(), host_data(), imaginary_stride() * 2);
}
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_device_to_host(
Element const* ptr_device_real, ///< source device memory
Element const* ptr_device_imag, ///< source device memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_host(
host_data(), ptr_device_real, count);
device_memory::copy_to_host(
host_data_imag(), ptr_device_imag, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_device_to_device(
Element const* ptr_device_real, ///< source device memory
Element const* ptr_device_imag, ///< source device memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_device_to_device(
device_data(), ptr_device_real, count);
device_memory::copy_device_to_device(
device_data_imag(), ptr_device_imag, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_host_to_device(
Element const* ptr_host_real, ///< source host memory
Element const* ptr_host_imag, ///< source host memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_device(
device_data(), ptr_host_real, count);
device_memory::copy_to_device(
device_data_imag(), ptr_host_imag, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_host_to_host(
Element const* ptr_host_real, ///< source host memory
Element const* ptr_host_imag, ///< source host memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_host_to_host(
host_data(), ptr_host_real, count);
device_memory::copy_host_to_host(
host_data_imag(), ptr_host_imag, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_device_to_host(
Element * ptr_host_real, ///< source device memory
Element * ptr_host_imag, ///< source device memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_host(
ptr_host_real, device_data(), count);
device_memory::copy_to_host(
ptr_host_imag, device_data_imag(), count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_device_to_device(
Element * ptr_device_real, ///< source device memory
Element * ptr_device_imag, ///< source device memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_device_to_device(
ptr_device_real, device_data(), count);
device_memory::copy_device_to_device(
ptr_device_imag, device_data_imag(), count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_host_to_device(
Element * ptr_device_real, ///< source device memory
Element * ptr_device_imag, ///< source device memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_device(
ptr_device_real, host_data(), count);
device_memory::copy_to_device(
ptr_device_imag, host_data_imag(), count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_host_to_host(
Element * ptr_host_real, ///< source host memory
Element * ptr_host_imag, ///< source host memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_host_to_host(
ptr_host_real, host_data(), count);
device_memory::copy_host_to_host(
ptr_host_imag, host_data_imag(), count);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| tools/util/include/cutlass/util/host_tensor_planar_complex.h/0 | {
"file_path": "tools/util/include/cutlass/util/host_tensor_planar_complex.h",
"repo_id": "tools",
"token_count": 6640
} | 63 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines host-side elementwise operations on TensorView.
*/
#pragma once
// Standard Library includes
#include <utility>
// Cutlass includes
#include "cutlass/cutlass.h"
#include "cutlass/relatively_equal.h"
#include "cutlass/util/distribution.h"
#include "tensor_foreach.h"
namespace cutlass {
namespace reference {
namespace device {
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
template <typename Element>
__global__ void BlockCompareEqual(
int *equal,
Element const *ptr_A,
Element const *ptr_B,
size_t capacity) {
size_t idx = threadIdx.x + blockDim.x * blockIdx.x;
for (; idx < capacity; idx += gridDim.x * blockDim.x) {
Element a = cutlass::ReferenceFactory<Element>::get(ptr_A, idx);
Element b = cutlass::ReferenceFactory<Element>::get(ptr_B, idx);
if (a != b) {
*equal = 0;
return;
}
}
}
template <typename Element>
__global__ void BlockCompareRelativelyEqual(
int *equal,
Element const *ptr_A,
Element const *ptr_B,
size_t capacity,
Element epsilon,
Element nonzero_floor) {
size_t idx = threadIdx.x + blockDim.x * blockIdx.x;
for (; idx < capacity; idx += gridDim.x * blockDim.x) {
Element a = cutlass::ReferenceFactory<Element>::get(ptr_A, idx);
Element b = cutlass::ReferenceFactory<Element>::get(ptr_B, idx);
if (!relatively_equal(a, b, epsilon, nonzero_floor)) {
*equal = 0;
return;
}
}
}
} // namespace kernel
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Performs a bit-level equality check between two blocks
template <typename Element>
bool BlockCompareEqual(
Element const *ptr_A,
Element const *ptr_B,
size_t capacity,
int grid_size = 0,
int block_size = 0) {
int equal_flag = 1;
int *device_equal_flag = nullptr;
if (cudaMalloc((void **)&device_equal_flag, sizeof(int)) != cudaSuccess) {
throw std::runtime_error("Failed to allocate device flag.");
}
if (cudaMemcpy(
device_equal_flag,
&equal_flag,
sizeof(int),
cudaMemcpyHostToDevice) != cudaSuccess) {
throw std::runtime_error("Failed to copy equality flag to device.");
}
if (!grid_size || !block_size) {
// if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API
cudaError_t result = cudaOccupancyMaxPotentialBlockSize(
&grid_size,
&block_size,
reinterpret_cast<void const *>(kernel::BlockCompareEqual<Element>));
if (result != cudaSuccess) {
throw std::runtime_error("Failed to query occupancy.");
}
// Limit block size. This has the effect of increasing the number of items processed by a
// single thread and reduces the impact of initialization overhead.
block_size = (block_size < 128 ? block_size : 128);
}
dim3 grid(grid_size, 1, 1);
dim3 block(block_size, 1, 1);
kernel::BlockCompareEqual<Element><<< grid, block >>>(device_equal_flag, ptr_A, ptr_B, capacity);
if (cudaMemcpy(
&equal_flag,
device_equal_flag,
sizeof(int),
cudaMemcpyDeviceToHost) != cudaSuccess) {
cudaFree(device_equal_flag);
throw std::runtime_error("Failed to copy equality flag from device.");
}
cudaFree(device_equal_flag);
return equal_flag;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Performs a bit-level equality check between two blocks
template <typename Element>
bool BlockCompareRelativelyEqual(
Element const *ptr_A,
Element const *ptr_B,
size_t capacity,
Element epsilon,
Element nonzero_floor,
int grid_size = 0,
int block_size = 0) {
int equal_flag = 1;
int *device_equal_flag = nullptr;
if (cudaMalloc((void **)&device_equal_flag, sizeof(int)) != cudaSuccess) {
throw std::runtime_error("Failed to allocate device flag.");
}
if (cudaMemcpy(
device_equal_flag,
&equal_flag,
sizeof(int),
cudaMemcpyHostToDevice) != cudaSuccess) {
throw std::runtime_error("Failed to copy equality flag to device.");
}
if (!grid_size || !block_size) {
// if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API
cudaError_t result = cudaOccupancyMaxPotentialBlockSize(
&grid_size,
&block_size,
reinterpret_cast<void const *>(kernel::BlockCompareRelativelyEqual<Element>));
if (result != cudaSuccess) {
throw std::runtime_error("Failed to query occupancy.");
}
// Limit block size. This has the effect of increasing the number of items processed by a
// single thread and reduces the impact of initialization overhead.
block_size = (block_size < 128 ? block_size : 128);
}
dim3 grid(grid_size, 1, 1);
dim3 block(block_size, 1, 1);
kernel::BlockCompareRelativelyEqual<Element><<< grid, block >>>(
device_equal_flag,
ptr_A,
ptr_B,
capacity,
epsilon,
nonzero_floor
);
if (cudaMemcpy(
&equal_flag,
device_equal_flag,
sizeof(int),
cudaMemcpyDeviceToHost) != cudaSuccess) {
cudaFree(device_equal_flag);
throw std::runtime_error("Failed to copy equality flag from device.");
}
cudaFree(device_equal_flag);
return equal_flag;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // device
} // reference
} // cutlass
| tools/util/include/cutlass/util/reference/device/tensor_compare.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/device/tensor_compare.h",
"repo_id": "tools",
"token_count": 2337
} | 64 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for SYMM update in host-side code.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/arch/mma.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
namespace cutlass {
namespace reference {
namespace host {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
SideMode SideModeA,
FillMode FillModeA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_symm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2,
"Tensors must be of rank 2");
static_assert(SideModeA != SideMode::kInvalid
, "Side Mode can either be Left or Right.");
static_assert(
FillModeA == FillMode::kLower ||
FillModeA == FillMode::kUpper,
"Fill Mode can either be Lower or Upper.");
using CompareOp_w_diag = typename TrMatrixCompareOp<FillModeA, DiagType::kNonUnit>::Type;
using CompareOp_wo_diag = typename TrMatrixCompareOp<FillModeA, DiagType::kZero>::Type;
// Note: batch is ignored.
int const M = problem_size.m();
int const N = problem_size.n();
// Assuming correct k-dimension value is passed
int const K = problem_size.k();
// Blocking necessary to speedup reference implementation
int const Mblock = 16;
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
CompareOp_w_diag compare_op_1;
CompareOp_wo_diag compare_op_2;
for (int row_block = 0; row_block < M; row_block += Mblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
ComputeType accum[Mblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N) {
ElementA a_1 = ElementA();
ElementB b_1 = ElementB();
ElementA a_2 = ElementA();
ElementB b_2 = ElementB();
// A x B or B x A (with diagonal)
if (SideModeA == SideMode::kLeft) {
a_1 = (compare_op_1(row, k_block)) ?
(tensor_a.at(MatrixCoord(row, k_block))) : ElementA();
b_1 = tensor_b.at(MatrixCoord(k_block, col));
} else if (SideModeA == SideMode::kRight) {
a_1 = tensor_b.at(MatrixCoord(row, k_block));
b_1 = (compare_op_1(k_block, col)) ?
tensor_a.at(MatrixCoord(k_block, col)) : ElementA();
}
ComputeType compute_a_1(cast_if_scalar<ComputeType>(a_1));
ComputeType compute_b_1(cast_if_scalar<ComputeType>(b_1));
accum[i][j] = inner_product_op(compute_a_1, compute_b_1, accum[i][j]);
// A^T x B or B x A^T (without diagonal)
if (SideModeA == SideMode::kLeft) {
a_2 = (compare_op_2(k_block, row)) ?
(tensor_a.at(MatrixCoord(k_block, row))) : ElementA();
b_2 = tensor_b.at(MatrixCoord(k_block, col));
} else if (SideModeA == SideMode::kRight) {
a_2 = tensor_b.at(MatrixCoord(row, k_block));
b_2 = (compare_op_2(col, k_block)) ?
tensor_a.at(MatrixCoord(col, k_block)) : ElementA();
}
ComputeType compute_a_2(cast_if_scalar<ComputeType>(a_2));
ComputeType compute_b_2(cast_if_scalar<ComputeType>(b_2));
accum[i][j] = inner_product_op(compute_a_2, compute_b_2, accum[i][j]);
}
}
}
}
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N) {
tensor_d.at(coord) = convert_op(
alpha * ScalarType(accum[i][j]) +
beta * ScalarType(tensor_c.at(coord)));
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general Symm update (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
SideMode SideModeA,
FillMode FillModeA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_symm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum) {
compute_symm<ElementA, LayoutA, SideModeA, FillModeA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, InnerProductOp, ConvertOp>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c,
initial_accum);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
SideMode SideModeA,
FillMode FillModeA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = cutlass::arch::OpMultiplyAdd
>
struct Symm;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA,
SideMode SideModeA, FillMode FillModeA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Symm<ElementA, LayoutA, SideModeA, FillModeA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpMultiplyAdd> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_symm<ElementA, LayoutA, SideModeA, FillModeA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_symm<ElementA, LayoutA, SideModeA, FillModeA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/host/symm.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/symm.h",
"repo_id": "tools",
"token_count": 4181
} | 65 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates several CUTLASS utilities in the context of a mixed-precision
floating-point matrix product computation.
These utilities are intended to be useful supporting components for managing tensor and matrix
memory allocations, initializing and comparing results, and computing reference output.
CUTLASS utilities are defined in the directory `tools/util`, and definitions appear
namespace `cutlass::` or an inner namespace therein. Operations in `cutlass::reference::` have
both host-side and device-side implementations, and the choice to use device-side initialization
and host-side verification in this example was arbitrary.
cutlass::half_t
This is a numeric type implementing IEEE half-precision quantities. It is functional in host
and device code. In host-side code, CUTLASS_ENABLE_F16C optionally enables harware-accelerated
numeric conversion on x86-64 CPUs support F16C extensions. In device code, all available
hardware is used to implement conversion and numeric operations.
cutlass::HostTensor<>
This template class simplifies the creation of tensors for all supported layouts. It simplifies
allocation and management of host- and device- memory allocations.
This class offers methods device_view() and host_view() to provide TensorView objects for
device- and host-side memory allocations.
cutlass::reference::device::TensorFillRandomGaussian()
This template function initializes elementsof a tensor to a random Gaussian distribution. It
uses cuRAND in device code to compute random numbers.
cutlass::reference::host::Gemm<>
This template function computes the general matrix product. This template supports unique
data types for each matrix operand, the internal accumulation type, and the scalar parameters
alpha and beta.
cutlass::reference::host::TensorEquals()
Compares two tensors of identical rank and returns true if values are bit equivalent.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
#include <fstream>
// CUTLASS includes needed for half-precision GEMM kernel
#include "cutlass/cutlass.h"
#include "cutlass/core_io.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/device/gemm.h"
//
// CUTLASS utility includes
//
// Defines operator<<() to write TensorView objects to std::ostream
#include "cutlass/util/tensor_view_io.h"
// Defines cutlass::HostTensor<>
#include "cutlass/util/host_tensor.h"
// Defines cutlass::half_t
#include "cutlass/numeric_types.h"
// Defines device_memory::copy_device_to_device()
#include "cutlass/util/device_memory.h"
// Defines cutlass::reference::device::TensorFillRandomGaussian()
#include "cutlass/util/reference/device/tensor_fill.h"
// Defines cutlass::reference::host::TensorEquals()
#include "cutlass/util/reference/host/tensor_compare.h"
// Defines cutlass::reference::host::Gemm()
#include "cutlass/util/reference/host/gemm.h"
#pragma warning( disable : 4503)
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
cudaError_t cutlass_hgemm_nn(
int M,
int N,
int K,
cutlass::half_t alpha,
cutlass::half_t const *A,
cutlass::layout::ColumnMajor::Stride::Index lda,
cutlass::half_t const *B,
cutlass::layout::ColumnMajor::Stride::Index ldb,
cutlass::half_t beta,
cutlass::half_t *C,
cutlass::layout::ColumnMajor::Stride::Index ldc) {
// Define the GEMM operation
using Gemm = cutlass::gemm::device::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor // LayoutOutput
>;
Gemm gemm_op;
cutlass::Status status = gemm_op({
{M, N, K},
{A, lda},
{B, ldb},
{C, ldc},
{C, ldc},
{alpha, beta}
});
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
cudaError_t TestCutlassGemm(int M, int N, int K, cutlass::half_t alpha, cutlass::half_t beta) {
cudaError_t result;
//
// Construct cutlass::HostTensor<> using the half-precision host-side type.
//
// cutlass::HostTensor<> allocates memory on both the host and device corresponding to rank=2
// tensors in column-major layout. Explicit synchronization methods are offered to copy the
// tensor to the device or to the host.
//
// M-by-K matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> A(cutlass::MatrixCoord(M, K));
// K-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> B(cutlass::MatrixCoord(K, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_cutlass(cutlass::MatrixCoord(M, N));
// M-by-N matrix of cutlass::half_t
cutlass::HostTensor<cutlass::half_t, cutlass::layout::ColumnMajor> C_reference(cutlass::MatrixCoord(M, N));
//
// Initialize matrices with small, random integers.
//
// Arbitrary RNG seed value. Hard-coded for deterministic results.
uint64_t seed = 2080;
// Gaussian random distribution
cutlass::half_t mean = 0.0_hf;
cutlass::half_t stddev = 5.0_hf;
// Specify the number of bits right of the binary decimal that are permitted
// to be non-zero. A value of "0" here truncates random values to integers
int bits_less_than_one = 0;
cutlass::reference::device::TensorFillRandomGaussian(
A.device_view(),
seed,
mean,
stddev,
bits_less_than_one
);
cutlass::reference::device::TensorFillRandomGaussian(
B.device_view(),
seed * 2019,
mean,
stddev,
bits_less_than_one
);
cutlass::reference::device::TensorFillRandomGaussian(
C_cutlass.device_view(),
seed * 1993,
mean,
stddev,
bits_less_than_one
);
// Copy C_cutlass into C_reference so the GEMM is correct when beta != 0.
cutlass::device_memory::copy_device_to_device(
C_reference.device_data(),
C_cutlass.device_data(),
C_cutlass.capacity());
// Copy the device-side view into host memory
C_reference.sync_host();
//
// Launch the CUTLASS GEMM kernel
//
result = cutlass_hgemm_nn(
M,
N,
K,
alpha,
A.device_data(),
A.stride(0),
B.device_data(),
B.stride(0),
beta,
C_cutlass.device_data(),
C_cutlass.stride(0)
);
if (result != cudaSuccess) {
return result;
}
//
// Verify the result using a host-side reference
//
// A and B were initialized using device-side procedures. The intent of this example is to
// use the host-side reference GEMM, so we must perform a device-to-host copy.
A.sync_host();
B.sync_host();
// Copy CUTLASS's GEMM results into host memory.
C_cutlass.sync_host();
// Compute the reference result using the host-side GEMM reference implementation.
cutlass::reference::host::Gemm<
cutlass::half_t, // ElementA
cutlass::layout::ColumnMajor, // LayoutA
cutlass::half_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
cutlass::half_t, // ElementOutput
cutlass::layout::ColumnMajor, // LayoutOutput
cutlass::half_t,
cutlass::half_t
> gemm_ref;
gemm_ref(
{M, N, K}, // problem size (type: cutlass::gemm::GemmCoord)
alpha, // alpha (type: cutlass::half_t)
A.host_ref(), // A (type: TensorRef<half_t, ColumnMajor>)
B.host_ref(), // B (type: TensorRef<half_t, ColumnMajor>)
beta, // beta (type: cutlass::half_t)
C_reference.host_ref() // C (type: TensorRef<half_t, ColumnMajor>)
);
// Compare reference to computed results.
if (!cutlass::reference::host::TensorEquals(
C_reference.host_view(),
C_cutlass.host_view())) {
char const *filename = "errors_01_cutlass_utilities.csv";
std::cerr << "Error - CUTLASS GEMM kernel differs from reference. Wrote computed and reference results to '" << filename << "'" << std::endl;
//
// On error, print C_cutlass and C_reference to std::cerr.
//
// Note, these are matrices of half-precision elements stored in host memory as
// arrays of type cutlass::half_t.
//
std::ofstream file(filename);
// Result of CUTLASS GEMM kernel
file << "\n\nCUTLASS =\n" << C_cutlass.host_view() << std::endl;
// Result of reference computation
file << "\n\nReference =\n" << C_reference.host_view() << std::endl;
// Return error code.
return cudaErrorUnknown;
}
// Passed error check
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to cutlass_utilities example.
//
// usage:
//
// 01_cutlass_utilities <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
//
// This example uses half-precision and is only suitable for devices with compute capabitliy 5.3 or greater.
//
cudaDeviceProp prop;
cudaError_t result = cudaGetDeviceProperties(&prop, 0);
if (result != cudaSuccess) {
std::cerr << "Failed to query device properties with error " << cudaGetErrorString(result) << std::endl;
return -1;
}
if (!(prop.major > 5 || (prop.major == 5 && prop.minor >= 3))) {
std::cerr << "This example uses half precision and is only suitable for devices with compute capability 5.3 or greater.\n";
std::cerr << "You are using a CUDA device with compute capability " << prop.major << "." << prop.minor << std::endl;
return -1;
}
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions: <M> <N> <K>
int problem[3] = { 128, 128, 128 };
for (int i = 1; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Linear scale factors in GEMM. Note, these are half-precision values stored as
// cutlass::half_t.
//
// Values outside the range of IEEE FP16 will overflow to infinity or underflow to zero.
//
cutlass::half_t scalars[2] = { 1.0_hf, 0.0_hf };
for (int i = 4; i < argc && i < 6; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 4]; // lexical cast to cutlass::half_t
}
//
// Run the CUTLASS GEMM test.
//
result = TestCutlassGemm(
problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| examples/01_cutlass_utilities/cutlass_utilities.cu/0 | {
"file_path": "examples/01_cutlass_utilities/cutlass_utilities.cu",
"repo_id": "examples",
"token_count": 4609
} | 0 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Example of running grouped back-to-back GEMMs when intermediate results are RF resident
*/
#include <iostream>
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/base_grouped.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
#include "device/b2b_gemm.h"
#include "kernel/default_b2b_gemm.h"
#include "threadblock/grouped_threadblock_swizzle.h"
#include "b2b_grouped_gemm_run.h"
#include "test_run.h"
////////////////////////////////////////////////////////////////////////////////
std::vector<cutlass::gemm::GemmCoord> gemm_f16_sm80_problem_sizes_0;
std::vector<cutlass::gemm::GemmCoord> gemm_f16_sm80_problem_sizes_1;
// Constraints:
// 1. Warp shape N must equal thread block shape N
// 2. Problem size N must equal thread block shape N
using ThreadblockShape0 = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape0 = cutlass::gemm::GemmShape<16, 64, 32>;
using ThreadblockShape1 = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape1 = cutlass::gemm::GemmShape<16, 128, 32>;
// Command line options parsing
struct Options {
bool help;
bool error;
bool reference_check;
int alignment = 8;
std::vector<cutlass::gemm::GemmCoord> problem_sizes0;
std::vector<cutlass::gemm::GemmCoord> problem_sizes1;
int problem_count;
bool verbose;
//
// Methods
//
Options():
help(false),
error(false),
reference_check(true),
problem_count(15),
verbose(false)
{ }
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("problems", problem_count, 15);
cmd.get_cmd_line_argument("reference-check", reference_check, true);
cmd.get_cmd_line_argument("verbose", verbose, false);
randomize_problems(cmd);
}
void randomize_problems(cutlass::CommandLine &cmd) {
//
// For now, randomly choose the problem sizes.
//
int cmd_line_m = -1;
int cmd_line_k = -1;
cmd.get_cmd_line_argument("m", cmd_line_m);
cmd.get_cmd_line_argument("k", cmd_line_k);
problem_sizes0.reserve(problem_count);
problem_sizes1.reserve(problem_count);
for (int i = 0; i < problem_count; ++i) {
int m = cmd_line_m;
int k = cmd_line_k;
if (m < 1) {
m = alignment * ((rand() % 256) + 1);
}
if (k < 1) {
k = alignment * ((rand() % 256) + 1);
}
cutlass::gemm::GemmCoord problem0(m, ThreadblockShape0::kN, k);
cutlass::gemm::GemmCoord problem1(m, ThreadblockShape1::kN, ThreadblockShape0::kN);
problem_sizes0.push_back(problem0);
problem_sizes1.push_back(problem1);
}
if (verbose) {
print_problem_sizes();
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "13_fused_two_gemms_grouped_f16_sm80_rf\n\n"
<< " This example runs a grouped back-to-back GEMM kernel. A group of independent back-to-back GEMMs are\n"
<< " run in a single kernel. Each indivdual problem in the group is subject to the same constraints that non-grouped\n"
<< " back-to-back GEMMs are subject to.s"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --problems=<int> Number of individual GEMM problems (default: --problems=15)\n"
<< " --m=<int> Sets the M dimension of both GEMMs for all groups. Otherwise, it is selected randomly\n"
<< " --k=<int> Sets the K dimension of the first GEMM for all groups. Otherwise, it is selected randomly\n"
<< " --verbose=<bool> If true, prints problem sizes.\n";
out << "\n\nExamples:\n\n"
<< "# Runs a grouped B2b GEMM with 10 random problem sizes\n"
<< "$ ./examples/13_two_tensor_op_fusion/13_fused_two_gemms_grouped_f16_sm80_rf --groups=10\n\n";
return out;
}
void print_problem_sizes() {
std::cout << std::endl;
std::cout << "Executing " << problem_count << " independent back-to-back GEMMs in a group" << std::endl;
for (int i = 0; i < problem_count; ++i) {
cutlass::gemm::GemmCoord problem0 = problem_sizes0.at(i);
cutlass::gemm::GemmCoord problem1 = problem_sizes1.at(i);
std::cout << "Problem " << i
<< "\t\tGEMM0: " << problem0.m() << 'x' << problem0.n() << 'x' << problem0.k()
<< "\t\tGEMM1: " << problem1.m() << 'x' << problem1.n() << 'x' << problem1.k()
<< std::endl;
}
}
};
bool run_fused_grouped_gemm_f16_sm80_rf_res() {
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
ElementCompute alpha0 = ElementCompute(1);
//Fused kernel has built-in bias, setting beta=0
ElementCompute beta0 = ElementCompute(0);
ElementCompute alpha1 = ElementCompute(1);
ElementCompute beta1 = ElementCompute(1); //beta=1 for bias
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using EpilogueOutputOp0 =
cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput,
InstructionShape::kM * InstructionShape::kN / 32,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>;
using EpilogueOutputOp1 =
cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>;
using GroupedThreadblockSwizzle = cutlass::gemm::threadblock::B2bGemmGroupedThreadblockSwizzle<
ThreadblockShape0,
cutlass::layout::RowMajor // LayoutC
>;
const int kAlignment = 128 / cutlass::sizeof_bits<ElementOutput>::value;
const int kStages = 3;
using B2bGemmKernel = cutlass::gemm::kernel::DefaultB2bGemm<
cutlass::half_t,
cutlass::layout::RowMajor,
kAlignment,
cutlass::half_t,
cutlass::layout::ColumnMajor,
kAlignment,
cutlass::half_t,
cutlass::layout::RowMajor,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
GroupedThreadblockSwizzle,
kStages,
cutlass::arch::OpMultiplyAdd
>::B2bGemmKernel;
using B2bGemm = cutlass::gemm::device::BaseGrouped<B2bGemmKernel>;
B2bFusedGroupedGemmRun<B2bGemm> fusedGemm;
std::cout << "Running Fused back-to-back FP16 TN Grouped GEMMs with RF residency...\n";
bool passed = fusedGemm.run(gemm_f16_sm80_problem_sizes_0, gemm_f16_sm80_problem_sizes_1, alpha0, beta0, alpha1, beta1);
if(passed)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return passed;
}
int main(int argc, char const **args) {
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.error) {
std::cerr << "Aborting execution." << std::endl;
return -1;
}
gemm_f16_sm80_problem_sizes_0 = options.problem_sizes0;
gemm_f16_sm80_problem_sizes_1 = options.problem_sizes1;
std::vector<bool (*)()>funcs = {
&run_fused_grouped_gemm_f16_sm80_rf_res
};
return testRun(80, funcs, "grouped gemm f16 RF residency");
}
////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/fused_two_gemms_grouped_f16_sm80_rf.cu/0 | {
"file_path": "examples/13_two_tensor_op_fusion/fused_two_gemms_grouped_f16_sm80_rf.cu",
"repo_id": "examples",
"token_count": 4015
} | 1 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines device-side elementwise operations on TensorView. Note, the operations defined
in this header are not specialized for any particular data layout and are therefore not
intended to offer the best possible performance. Rather, they are intended to be generic
reference implementations to support the CUTLASS unit tests.
*/
#pragma once
// Cutlass includes
#include "cutlass/cutlass.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reference {
namespace device {
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
template <
typename TensorRefIn, ///< Input TensorRef Type
typename TensorRefOut, ///< Output TensorRef Type
typename ScalarType, ///< alpha Type
typename TensorRefScalar, ///< Scale/Bias TensorRef Type
typename OutputTile,
typename ConvertOp = NumericConverter<typename TensorRefOut::Element, ScalarType>
>
__global__ void TensorScaleBiasGemm(
gemm::GemmCoord problem_size,
TensorRefIn tensor_in, ///< input tensor
TensorRefOut tensor_out, ///< output tensor
ScalarType alpha, ///< alpha
TensorRefScalar tensor_scale, ///< scale tensor
TensorRefScalar tensor_bias ///< bias tensor
) {
ConvertOp convert_op;
MatrixCoord output_coord(
MatrixCoord::Index((threadIdx.x + blockIdx.x * blockDim.x) * OutputTile::kRow),
MatrixCoord::Index((threadIdx.y + blockIdx.y * blockDim.y) * OutputTile::kColumn)
);
// Update the output tensor
for (int j = 0; j < OutputTile::kRow; ++j) {
for (int i = 0; i < OutputTile::kColumn; ++i) {
MatrixCoord coord = output_coord + MatrixCoord(i, j);
if (coord.row() < problem_size.m() && coord.column() < problem_size.n()) {
ScalarType scale = alpha;
if(tensor_scale.good())
scale = tensor_scale.at({0, coord.column()});
ScalarType bias = ScalarType(0);
if(tensor_bias.good())
bias = tensor_bias.at({0, coord.column()});
tensor_out.at(coord) = convert_op(
scale * ScalarType(tensor_in.at(coord)) + bias);
}
}
}
}
template <
typename TensorRefIn, ///< Input TensorRef Type
typename TensorRefOut, ///< Output TensorRef Type
typename ScalarType, ///< alpha Type
typename TensorRefScalar, ///< Scale/Bias TensorRef Type
typename ConvertOp = NumericConverter<typename TensorRefOut::Element, ScalarType>,
int kMblock = 4,
int kNblock = 4
>
__global__ void TensorScaleBiasGemmBatched(
gemm::GemmCoord problem_size,
TensorRefIn tensor_in, ///< input tensor
TensorRefOut tensor_out, ///< output tensor
ScalarType alpha, ///< alpha
TensorRefScalar tensor_scale, ///< scale tensor
TensorRefScalar tensor_bias, ///< bias tensor
int batch_count = 1,
int64_t batch_stride_tensor_in = 0,
int64_t batch_stride_tensor_out = 0,
int64_t batch_stride_tensor_scale = 0,
int64_t batch_stride_tensor_bias = 0
) {
ConvertOp convert_op;
int row_block = (blockIdx.x * blockDim.x + threadIdx.x) * kMblock;
int col_block = (blockIdx.y * blockDim.y + threadIdx.y) * kNblock;
int batch_idx = blockIdx.z;
tensor_in.add_pointer_offset(batch_idx * batch_stride_tensor_in);
tensor_out.add_pointer_offset(batch_idx * batch_stride_tensor_out);
tensor_scale.add_pointer_offset(batch_idx * batch_stride_tensor_scale);
tensor_bias.add_pointer_offset(batch_idx * batch_stride_tensor_bias);
for (; batch_idx < batch_count; batch_idx += gridDim.z) {
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kNblock; j++) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kMblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (coord.row() < problem_size.m() && coord.column() < problem_size.n()) {
ScalarType scale = alpha;
if(tensor_scale.good())
scale = tensor_scale.at({0, coord.column()});
ScalarType bias = ScalarType(0);
if(tensor_bias.good())
bias = tensor_bias.at({0, coord.column()});
tensor_out.at(coord) = convert_op(
scale * ScalarType(tensor_in.at(coord)) + bias);
}
}
}
tensor_in.add_pointer_offset(batch_stride_tensor_in * gridDim.z);
tensor_out.add_pointer_offset(batch_stride_tensor_out * gridDim.z);
tensor_scale.add_pointer_offset(batch_stride_tensor_scale * gridDim.z);
tensor_bias.add_pointer_offset(batch_stride_tensor_bias * gridDim.z);
}
}
template <
typename TensorRefIn, ///< Input TensorRef Type
typename TensorRefOut, ///< Output TensorRef Type
typename ScalarType, ///< alpha Type
typename TensorRefScalar, ///< Scale/Bias TensorRef Type
typename ConvertOp = NumericConverter<typename TensorRefOut::Element, ScalarType>,
int kThreadM = 4, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 16, // shape of a threadblock in units of threads
int kCtaShapeN = 8 // shape of a threadblock in units of threads
>
__global__ void TensorScaleBiasConv2d(
conv::Conv2dProblemSize problem_size,
TensorRefIn tensor_in, ///< input tensor
TensorRefOut tensor_out, ///< output tensor
ScalarType alpha, ///< alpha
TensorRefScalar tensor_scale, ///< scale tensor
TensorRefScalar tensor_bias ///< bias tensor
) {
ConvertOp convert_op;
int64_t npq_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int k_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_n[kThreadM];
int thread_p[kThreadM];
int thread_q[kThreadM];
// Compute N, P, Q coordinates for each row of a thread's tile
int64_t PQ = int64_t(problem_size.P) * problem_size.Q;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int64_t npq = npq_start + m;
thread_n[m] = int(npq / PQ);
int64_t residual = npq % PQ;
thread_p[m] = int(residual / problem_size.Q);
thread_q[m] = int(residual % problem_size.Q);
}
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
if (thread_n[m] < problem_size.N && thread_p[m] < problem_size.P && thread_q[m] < problem_size.Q) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_k = k_start + n;
if (thread_k < problem_size.K) {
ScalarType scale = alpha;
if(tensor_scale.good())
scale = tensor_scale.at({0, thread_k});
ScalarType bias = ScalarType(0);
if(tensor_bias.good())
bias = tensor_bias.at({0, thread_k});
tensor_out.at({thread_n[m], thread_p[m], thread_q[m], thread_k}) = convert_op(
scale * ScalarType(
tensor_in.at({thread_n[m], thread_p[m], thread_q[m], thread_k})
) + bias);
}
}
}
}
}
}
/// Apply scale and bias on a tensor
template <
typename ElementIn, ///< Input Type
typename ElementOut, ///< Output Type
typename Layout, ///< Layout of input/output tensor
typename ScalarType, ///< alpha Type
typename LayoutScaleBias, ///< Layout of scale and bias
typename ConvertOp = NumericConverter<ElementOut, ScalarType>
>
void TensorScaleBiasGemm(
gemm::GemmCoord problem_size,
TensorRef<ElementIn, Layout> tensor_in, ///< input tensor
TensorRef<ElementOut, Layout> tensor_out, ///< output tensor
ScalarType alpha, ///< alpha
TensorRef<ScalarType, LayoutScaleBias> tensor_scale, ///< scale tensor
TensorRef<ScalarType, LayoutScaleBias> tensor_bias ///< bias tensor
) {
using OutputTile = MatrixShape<4, 4>;
dim3 block(16, 8);
dim3 grid(
(problem_size.m() + block.x * OutputTile::kRow - 1) / (block.x * OutputTile::kRow),
(problem_size.n() + block.y * OutputTile::kColumn - 1) / (block.y * OutputTile::kColumn)
);
kernel::TensorScaleBiasGemm<
TensorRef<ElementIn, Layout>,
TensorRef<ElementOut, Layout>,
ScalarType,
TensorRef<ScalarType, LayoutScaleBias>,
OutputTile,
ConvertOp
><<< grid, block >>> (
problem_size,
tensor_in,
tensor_out,
alpha,
tensor_scale,
tensor_bias
);
}
/// Apply scale and bias on a tensor
template <
typename ElementIn, ///< Input Type
typename ElementOut, ///< Output Type
typename Layout, ///< Layout of input/output tensor
typename ScalarType, ///< alpha Type
typename LayoutScaleBias, ///< Layout of scale and bias
typename ConvertOp = NumericConverter<ElementOut, ScalarType>
>
void TensorScaleBiasGemmBatched(
gemm::GemmCoord problem_size,
TensorRef<ElementIn, Layout> tensor_in, ///< input tensor
TensorRef<ElementOut, Layout> tensor_out, ///< output tensor
ScalarType alpha, ///< alpha
TensorRef<ScalarType, LayoutScaleBias> tensor_scale, ///< scale tensor
TensorRef<ScalarType, LayoutScaleBias> tensor_bias, ///< bias tensor
int batch_count = 1,
int64_t batch_stride_tensor_in = 0,
int64_t batch_stride_tensor_out = 0,
int64_t batch_stride_tensor_scale = 0,
int64_t batch_stride_tensor_bias = 0
) {
int const kMblock = 4;
int const kNblock = 4;
dim3 block(16, 8);
dim3 grid(
(problem_size.m() + block.x * kMblock - 1) / (block.x * kMblock),
(problem_size.n() + block.y * kNblock - 1) / (block.y * kNblock),
batch_count % std::numeric_limits<uint16_t>::max()
);
kernel::TensorScaleBiasGemmBatched<
TensorRef<ElementIn, Layout>,
TensorRef<ElementOut, Layout>,
ScalarType,
TensorRef<ScalarType, LayoutScaleBias>,
ConvertOp,
kMblock,
kNblock
><<< grid, block >>> (
problem_size,
tensor_in,
tensor_out,
alpha,
tensor_scale,
tensor_bias,
batch_count,
batch_stride_tensor_in,
batch_stride_tensor_out,
batch_stride_tensor_scale,
batch_stride_tensor_bias
);
}
/// Apply scale and bias on a tensor
template <
typename ElementIn, ///< Input Type
typename ElementOut, ///< Output Type
typename Layout, ///< Layout of input/output tensor
typename ScalarType, ///< alpha Type
typename LayoutScaleBias, ///< Layout of scale and bias
typename ConvertOp = NumericConverter<ElementOut, ScalarType>
>
void TensorScaleBiasConv2d(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementIn, Layout> tensor_in, ///< input tensor
TensorRef<ElementOut, Layout> tensor_out, ///< output tensor
ScalarType alpha, ///< alpha
TensorRef<ScalarType, LayoutScaleBias> tensor_scale, ///< scale tensor
TensorRef<ScalarType, LayoutScaleBias> tensor_bias ///< bias tensor
) {
int const kThreadM = 4; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 16; // shape of a threadblock in units of threads
int const kCtaShapeN = 8; // shape of a threadblock in units of threads
int64_t npq = int64_t(problem_size.N) * problem_size.P * problem_size.Q;
int64_t blocks_m = (npq + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid(uint32_t(blocks_m), (problem_size.K + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN));
kernel::TensorScaleBiasConv2d<
TensorRef<ElementIn, Layout>,
TensorRef<ElementOut, Layout>,
ScalarType,
TensorRef<ScalarType, LayoutScaleBias>,
ConvertOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block >>> (
problem_size,
tensor_in,
tensor_out,
alpha,
tensor_scale,
tensor_bias
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
| examples/13_two_tensor_op_fusion/reference/device/tensor_scale_bias.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/reference/device/tensor_scale_bias.h",
"repo_id": "examples",
"token_count": 6022
} | 2 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
Please check example 07 and 08 for the basics of tensor op gemm kernels. On NVIDIA Ampere
architecture, most concept still holds. The two main differences are
1. NVIDIA Ampere architecture introduces a new series of tensor core instructions (see
include/cutlass/arch/mma_sm80.h) which are more efficient on Ampere.
2. NVIDIA Ampere architecture uses cp_async() to build multistage software pipeline to better hide
latency (see include/cutlass/gemm/threadblock/mma_multistage.h)
Moreover, NVIDIA Ampere architecture starts supporting tfloat32 (see include/cutlass/tfloat32.h)
data types in tensor cores. One big advantage is that we can load in fp32 data and convert them
implicitly to tf32 inside the GEMM kernel which means no change is needed to accelerate traditional
fp32 data by using NVIDIA Ampere architecture.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
float alpha;
float beta;
bool reference_check;
int iterations;
Options():
help(false),
problem_size({5120, 4096, 4096}),
batch_count(1),
reference_check(true),
iterations(20),
alpha(1),
beta() { }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "14_ampere_tf32_tensorop_gemm example\n\n"
<< " This example uses the CUTLASS Library to execute TF32 tensorop GEMM computations.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/14_ampere_tf32_tensorop_gemm/14_ampere_tf32_tensorop_gemm --m=1024 --n=512 --k=1024 \\\n"
<< " --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.product() * batch_count;
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = float; // <- data type of elements in input matrix A
using ElementInputB = float; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 16>; // <- threadblock tile M = 128, N = 128, K = 16
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 16>; // <- warp tile M = 64, N = 64, K = 16
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 4;
using Gemm = cutlass::gemm::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
int run(Options &options) {
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size = options.problem_size;
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(options.alpha);
ElementComputeEpilogue beta = ElementComputeEpilogue(options.beta);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Result structure
Result result;
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMMs
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
}
//
// Stop profiling loop
//
// Record an event when the GEMMs are complete
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
// Create instantiation for device reference gemm kernel
cutlass::reference::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue>
gemm_device;
// Launch device reference gemm kernel
gemm_device(problem_size,
alpha,
tensor_a.device_ref(),
tensor_b.device_ref(),
beta,
tensor_c.device_ref(),
tensor_ref_d.device_ref());
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (passed) {
std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " GFLOPs: " << result.gflops << std::endl;
}
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main(int argc, const char **argv) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
printf("%d x %d x %d TF32 tensor op Matrix Multiply\n", \
options.problem_size.m(), options.problem_size.n(), options.problem_size.k());
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
return run(options);
}
| examples/14_ampere_tf32_tensorop_gemm/ampere_tf32_tensorop_gemm.cu/0 | {
"file_path": "examples/14_ampere_tf32_tensorop_gemm/ampere_tf32_tensorop_gemm.cu",
"repo_id": "examples",
"token_count": 6936
} | 3 |
################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import sys
print("This example is deprecated. Please see examples/python for examples of using "
"the CUTLASS Python interface.")
sys.exit(0)
import numpy as np
import cutlass.backend as pycutlass
from cutlass.backend import *
from cutlass.backend.utils.device import device_cc
import cutlass_bindings
from bfloat16 import bfloat16
import argparse
# parse the arguments
parser = argparse.ArgumentParser(description="Launch CUTLASS GEMM kernels from Python: 'D = alpha * A * B + beta * C'")
# Operation description
# math instruction description
parser.add_argument("-i", "--instruction_shape",
default=[1, 1, 1], nargs=3, type=int,
help="This option describes the size of MMA op")
parser.add_argument("-ta", "--element_a", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor A')
parser.add_argument("-tb", "--element_b", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor B')
parser.add_argument("-tc", "--element_c", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor C and output tensor D')
parser.add_argument("-tacc", "--element_acc", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of accumulator')
parser.add_argument('-m', "--math", default="multiply_add",
type=str, choices=["multiply_add", "multiply_add_fast_bf16", "multiply_add_fast_f32"], help="math instruction")
parser.add_argument('-op', "--opcode", default="Simt", type=str,
choices=["Simt", 'TensorOp'],
help="This option describes whether you want to use tensor \
cores (TensorOp) or regular SIMT cores (Simt) on GPU SM")
# tile description
parser.add_argument("-b", "--threadblock_shape",
default=[128, 128, 8], nargs=3, type=int,
help="This option describes the tile size a thread block with compute")
parser.add_argument("-s", "--stages", default=4,
type=int, help="Number of pipelines you want to use")
parser.add_argument("-w", "--warp_count", default=[4, 2, 1], nargs=3, type=int,
help="This option describes the number of warps along M, N, and K of the threadblock")
parser.add_argument("-cc", "--compute_capability", default=80,
type=int, help="This option describes CUDA SM architecture number")
# A
parser.add_argument('-la', "--layout_a", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor A")
parser.add_argument('-aa', '--alignment_a', default=1,
type=int, help="Memory alignement of input tensor A")
# B
parser.add_argument('-lb', "--layout_b", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor B")
parser.add_argument('-ab', '--alignment_b', default=1,
type=int, help="Memory alignment of input tensor B")
# C
parser.add_argument('-lc', "--layout_c", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor C and output tensor D")
parser.add_argument('-ac', '--alignment_c', default=1,
type=int, help="Memory alignment of input tensor C and output tensor D")
# epilogue
parser.add_argument("-te", "--element_epilogue", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16'], help='Epilogue datatype')
parser.add_argument("-ep", "--epilogue_functor", default="LinearCombination",
type=str, choices=['LinearCombination', 'FastLinearCombinationClamp', 'LinearCombinationClamp'],
help="This option describes the epilogue part of the kernel")
# swizzling
parser.add_argument("-sw", "--swizzling_functor", default="IdentitySwizzle1", type=str, choices=[
"IdentitySwizzle1", "IdentitySwizzle2", "IdentitySwizzle4", "IdentitySwizzle8", "HorizontalSwizzle", "BatchedIdentitySwizzle"],
help="This option describes how thread blocks are scheduled on GPU")
# Argument
parser.add_argument("-p", "--problem_size",
default=[128, 128, 128], nargs=3, type=int,
help="GEMM problem size M, N, K")
parser.add_argument("-alpha", "--alpha", default=1.0, type=float,
help="Scaling factor of A * B")
parser.add_argument("-beta", "--beta", default=0.0, type=float,
help="Scaling factor of C")
parser.add_argument("-gm", "--gemm_mode", default="Gemm", type=str,
choices=["Gemm", "GemmSplitKParallel", "Batched", "Array"],
help="GEMM mode. Gemm is used for non-splitK or serial-splitK. \
GemmSplitKParallel is used for parallel splitK")
parser.add_argument('-k', '--split_k_slices', default=1,
type=int, help="Number of split-k partitions. (default 1)")
parser.add_argument('-bias', '--bias', action='store_true', help="C is bias vector")
parser.add_argument('-batch', '--batch', default=1, type=int, help="batch size for batched GEMM")
# Activation function
parser.add_argument("-activ", "--activation_function", default="identity",
choices=["identity", "relu", "leaky_relu", "tanh", "sigmoid", "silu", "hardswish", "gelu"], help="activation function")
parser.add_argument("-activ_arg", "--activation_args", default=[], nargs="+", type=float,
help="addition arguments for activation")
parser.add_argument('--print_cuda', action="store_true",
help="print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
cc = device_cc()
if args.compute_capability != cc:
raise Exception(("Parameter --compute-capability of {} "
"does not match that of the device of {}.").format(args.compute_capability, cc))
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
pycutlass.compiler.nvcc()
np.random.seed(0)
element_a = getattr(cutlass_bindings, args.element_a)
element_b = getattr(cutlass_bindings, args.element_b)
element_c = getattr(cutlass_bindings, args.element_c)
element_acc = getattr(cutlass_bindings, args.element_acc)
math_operation = getattr(MathOperation, args.math)
opclass = getattr(cutlass_bindings.OpClass, args.opcode)
math_inst = MathInstruction(
args.instruction_shape, element_a, element_b,
element_acc, opclass, math_operation
)
tile_description = TileDescription(
args.threadblock_shape, args.stages, args.warp_count,
math_inst
)
layout_a = getattr(cutlass_bindings, args.layout_a)
layout_b = getattr(cutlass_bindings, args.layout_b)
layout_c = getattr(cutlass_bindings, args.layout_c)
A = TensorDescription(
element_a, layout_a, args.alignment_a
)
B = TensorDescription(
element_b, layout_b, args.alignment_b
)
C = TensorDescription(
element_c, layout_c, args.alignment_c
)
element_epilogue = getattr(cutlass_bindings, args.element_epilogue)
if (args.activation_function == "identity"
or (args.gemm_mode == "GemmSplitKParallel" and args.split_k_slices > 1)):
#
epilogue_functor = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = getattr(cutlass_bindings, args.swizzling_functor)
operation = GemmOperationUniversal(
arch=args.compute_capability, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
if args.print_cuda:
print(operation.rt_module.emit())
operations = [operation, ]
if args.gemm_mode == "GemmSplitKParallel":
if (args.activation_function == "identity"):
epilogue_functor_reduction = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor_reduction = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
reduction_operation = ReductionOperation(
shape=cutlass_bindings.MatrixCoord(4, 32 * C.alignment),
C=C, element_accumulator=element_acc,
element_compute=element_epilogue,
epilogue_functor=epilogue_functor_reduction,
count=C.alignment
)
operations.append(reduction_operation)
pycutlass.compiler.add_module(operations)
# User-provide inputs
problem_size = cutlass_bindings.gemm.GemmCoord(
args.problem_size[0], args.problem_size[1], args.problem_size[2])
tensor_a_size = args.batch * problem_size.m() * problem_size.k()
if args.element_a != "int8":
if args.element_a == "bfloat16":
tensor_A = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_a_size,))
).astype(bfloat16)
else:
tensor_A = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_a_size,))
).astype(getattr(np, args.element_a))
else:
tensor_A = np.random.uniform(
low=-2, high=2,size=(tensor_a_size,)
).astype(getattr(np, args.element_a))
tensor_b_size = args.batch * problem_size.k() * problem_size.n()
if args.element_b != "int8":
if args.element_b == "bfloat16":
tensor_B = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_b_size,))
).astype(bfloat16)
else:
tensor_B = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_b_size,))
).astype(getattr(np, args.element_b))
else:
tensor_B = np.random.uniform(
low=-2, high=2, size=(tensor_b_size,)
).astype(getattr(np, args.element_b))
if args.element_c != "int8":
if args.bias:
if args.layout_c == "RowMajor":
tensor_c_size = args.batch * problem_size.n()
elif args.layout_c == "ColumnMajor":
tensor_c_size = args.batch * problem_size.m()
else:
raise ValueError(args.layout_c)
else:
tensor_c_size = args.batch * problem_size.m() * problem_size.n()
if args.element_c == "bfloat16":
tensor_C = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_c_size,))
).astype(bfloat16)
else:
tensor_C = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_c_size,))
).astype(getattr(np, args.element_c))
else:
tensor_C = np.random.uniform(
low=-2, high=2, size=(args.batch * problem_size.m() * problem_size.n(),)
).astype(getattr(np, args.element_c))
tensor_D = np.zeros(
shape=(args.batch * problem_size.m() * problem_size.n(),)
).astype(getattr(np, args.element_c))
output_op = operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args))
arguments = GemmArguments(
operation=operation, problem_size=problem_size,
A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D,
output_op=output_op,
gemm_mode=getattr(cutlass_bindings.gemm.Mode, args.gemm_mode),
split_k_slices=args.split_k_slices, batch=args.batch
)
if args.gemm_mode == "GemmSplitKParallel":
reduction_arguments = ReductionArguments(
operation=reduction_operation,
problem_size=[problem_size.m(), problem_size.n()],
partitions=args.split_k_slices, workspace=arguments.ptr_D,
destination=tensor_D, source=tensor_C,
output_op=reduction_operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args)),
bias = arguments.bias
)
operation.run(arguments)
if args.gemm_mode == "GemmSplitKParallel":
reduction_operation.run(reduction_arguments)
reduction_arguments.sync()
else:
arguments.sync()
# run the host reference module
reference = ReferenceModule(A, B, C)
tensor_D_ref = reference.run(
tensor_A, tensor_B, tensor_C, problem_size, args.alpha, args.beta, args.bias, args.batch)
tensor_D_ref = getattr(pycutlass, args.activation_function).numpy(*([tensor_D_ref,] + args.activation_args))
try:
assert np.array_equal(tensor_D, tensor_D_ref)
except:
assert np.allclose(tensor_D, tensor_D_ref, atol=1e-5)
print("Passed.")
| examples/40_cutlass_py/customizable/gemm.py/0 | {
"file_path": "examples/40_cutlass_py/customizable/gemm.py",
"repo_id": "examples",
"token_count": 6052
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS Attention Example.
This workload computes a fused multi head attention that supports variable sequence lengths.
Because it keeps the attention matrix in shared memory, it's both faster and
uses less global memory.
This is based on `"Self-Attention Does Not Need O(n^2) Memory" <http://arxiv.org/abs/2112.05682>`_,
and very similar to `"FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness" <https://arxiv.org/abs/2205.14135>`_.
Algorithm:
In short, we can compute the output incrementally in blocks of size B,
we just need to divide the final result by the sum of all coefficients in
the softmax (which we compute incrementally) with the following pseudo-code:
```
s_prime = torch.zeros([num_queries, B])
O = torch.zeros([num_queries, head_size_v])
for i in range(0, K.shape[0], B):
si = exp((Q . K[i * B:(i+1) * B].t) * scale)
sum_coefs += attn_unscaled.sum(-1)
O += si . V[i * B:(i+1) * B]
O = O / s_prime
```
In practice, and for numerical stability reasons,
we also substract the maximum so far (`mi`) before doing
the exponential. When we encounter new keys, the maximum
used to compute O so far (`m_prime`) can differ from the
current maximum, so we update O before accumulating with
```
O = O * exp(m_prime - mi)
m_prime = mi
```
Implementation details:
- `si` is stored in shared memory between the 2 back to back gemms
- we keep and accumulate the output
directly in registers if we can (`head_size_v <= 128`).
Otherwise, we store it & accumulate in global memory (slower)
- blocks are parallelized across the batch dimension, the number
of heads, and the query sequence size
Examples:
# Run an attention example with default setup
$ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_variable_seqlen
# Run an attention example with custom setup
$ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_variable_seqlen --head_number=2 --batch_size=3 --head_size=32 --head_size_v=64 --seq_length=512 --seq_length_kv=1024 --causal=true
Acknowledgement: Fixed-sequence-length FMHA code was upstreamed by Meta xFormers (https://github.com/facebookresearch/xformers).
Using grouped GEMM to handle variable sequence lengths is inspired by an idea originally prototyped by ByteDance Inc.
*/
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/device/gemm_grouped.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/device/gemm_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_complex.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/fast_math.h"
#include "default_fmha_grouped.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
bool error;
bool reference_check;
bool use_mask;
bool causal;
bool fixed_seq_length;
std::vector<cutlass::gemm::GemmCoord> problem_sizes0;
std::vector<cutlass::gemm::GemmCoord> problem_sizes1;
std::vector<cutlass::gemm::GemmCoord> problem_sizes0_real;
std::vector<cutlass::gemm::GemmCoord> problem_sizes1_real;
int alignment;
int head_number;
int batch_size;
int head_size;
int head_size_v;
int seq_length;
int seq_length_kv;
int iterations;
int problem_count;
// alpha0, alpha1 and beta are fixed
// in this multi-head attention example
float alpha0;
float alpha1;
float beta;
cutlass::gemm::kernel::GroupScheduleMode scheduler_mode;
//
// Methods
//
Options():
help(false),
error(false),
alignment(1),
reference_check(true),
head_number(12),
batch_size(16),
head_size(64),
head_size_v(64),
seq_length(1024),
seq_length_kv(1024),
use_mask(false),
iterations(20),
causal(false),
fixed_seq_length(false),
problem_count(batch_size * head_number),
scheduler_mode(cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly)
{ }
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("alignment", alignment, 1);
cmd.get_cmd_line_argument("head_number", head_number, 12);
cmd.get_cmd_line_argument("batch_size", batch_size, 16);
cmd.get_cmd_line_argument("head_size", head_size, 64);
cmd.get_cmd_line_argument("head_size_v", head_size_v, head_size);
cmd.get_cmd_line_argument("seq_length", seq_length, 1024);
cmd.get_cmd_line_argument("seq_length_kv", seq_length_kv, seq_length);
cmd.get_cmd_line_argument("use_mask", use_mask, false);
cmd.get_cmd_line_argument("iterations", iterations, 20);
cmd.get_cmd_line_argument("reference-check", reference_check, true);
cmd.get_cmd_line_argument("causal", causal, true);
cmd.get_cmd_line_argument("fixed_seq_length", fixed_seq_length, false);
std::vector<std::string> scheduler_mode_strs;
cmd.get_cmd_line_arguments("scheduler-mode", scheduler_mode_strs);
if (!scheduler_mode_strs.empty()) {
if (scheduler_mode_strs.size() > 1) {
std::cerr << "Only one scheduler mode may be passed in" << std::endl;
error = true;
return;
}
std::string scheduler_mode_str = scheduler_mode_strs[0];
if (scheduler_mode_str == "kDeviceOnly") {
scheduler_mode = cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly;
} else if (scheduler_mode_str == "kHostPrecompute") {
scheduler_mode = cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute;
} else {
std::cerr << "Unrecognized scheduler mode '" << scheduler_mode_str << "'" << std::endl;
error = true;
return;
}
}
if (fixed_seq_length) {
std::cout << "NOTE: Better performance is expected for fixed-sized sequence length from 41_fused_multi_head_attention_fixed_seqlen." << std::endl;
}
randomize_problems();
}
void randomize_problems() {
problem_count = head_number * batch_size;
problem_sizes0.reserve(problem_count);
problem_sizes1.reserve(problem_count);
// When using mask, the original inputs are not padded
// and we need to save these info.
if (use_mask) {
problem_sizes0_real.reserve(problem_count);
problem_sizes1_real.reserve(problem_count);
}
for (int i = 0; i < batch_size; ++i) {
// problems belonging to the same batch share the same seq len
int m_real, mkv_real;
if (fixed_seq_length) {
m_real = seq_length;
mkv_real = seq_length_kv;
} else {
m_real = (rand() % seq_length) + 1;
// Only randomize seq_length_kv if it was set to a different value than
// seq_length originally.
if (seq_length != seq_length_kv) {
mkv_real = (rand() % seq_length_kv) + 1;
} else {
mkv_real = m_real;
}
}
int m = (m_real + alignment - 1) / alignment * alignment;
int mkv = (mkv_real + alignment - 1) / alignment * alignment;
int k0 = head_size;
int k1 = head_size_v;
for (int j = 0; j < head_number; ++j) {
cutlass::gemm::GemmCoord problem0(m, mkv, k0);
cutlass::gemm::GemmCoord problem1(m, k1, mkv);
problem_sizes0.push_back(problem0);
problem_sizes1.push_back(problem1);
if (use_mask) {
cutlass::gemm::GemmCoord problem0_real(m_real, mkv_real, k0);
cutlass::gemm::GemmCoord problem1_real(m_real, k1, mkv_real);
problem_sizes0_real.push_back(problem0_real);
problem_sizes1_real.push_back(problem1_real);
}
}
}
}
void print_problems() {
std::cout << " Running " << batch_size << " batches, each with " << head_number << " heads of size " << head_size << ":" << std::endl;
for (int i = 0; i < batch_size; ++i) {
int idx = i * head_number;
std::cout << " [" << i << "] seq_length = " << problem_sizes0[idx].m() << " seq_length_kv = " << problem_sizes0[idx].n() << std::endl;
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "41_fused_multi_head_attention_variable_seqlen\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --head_number=<int> Head number in multi-head attention (default: --head_number=12)\n"
<< " --batch_size=<int> Batch size in multi-head attention (default: --batch_size=16)\n"
<< " --head_size=<int> Head size in multi-head attention (default: --head_size=64)\n"
<< " --head_size_v=<int> Head size in multi-head attention for V (default: --head_size_v=head_size)\n"
<< " --seq_length=<int> Sequence length in multi-head attention for Q (default: --seq_length=1024)\n"
<< " --seq_length_kv=<int> Sequence length in multi-head attention for K/V (default: --seq_length_kv=seq_length)\n"
<< " --use_mask=<bool> If true, performs padding-like masking in softmax.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --reference-check=<bool> If true, performs reference check.\n"
<< " --causal=<bool> If true, uses causal masking.\n"
<< " --fixed_seq_length=<bool> If true, uses the same sequence length for each item in the batch.\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fops = int64_t();
for (size_t i = 0; i < problem_sizes0.size(); ++i) {
auto const& problem0 = problem_sizes0[i];
auto const& problem1 = problem_sizes1[i];
for (int row = 0; row < problem0.m(); ++row) {
int num_cols0 = problem0.n();
if (causal) {
num_cols0 = std::min(row + 1, num_cols0);
}
// P <- Q . K_t
fops += 2 * num_cols0 * problem0.k();
// P <- exp(P - max(P))
fops += 2 * num_cols0;
// S <- sum(P)
fops += num_cols0 - 1;
// O <- P . V
fops += 2 * num_cols0 * problem1.n();
// O <- O / S
fops += num_cols0 * problem1.n();
}
}
return double(fops) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Attention>
class TestbedAttention {
public:
//
// Type definitions
//
using scalar_t = typename Attention::GemmKernel::scalar_t;
using accum_t = typename Attention::GemmKernel::accum_t;
using output_t = typename Attention::GemmKernel::output_t;
using output_accum_t = typename Attention::GemmKernel::output_accum_t;
using ElementQ = scalar_t;
using ElementK = scalar_t;
using ElementP = accum_t;
using ElementAccumulator = accum_t;
using ElementV = scalar_t;
using ElementO = output_t;
using ElementOAccum = output_accum_t;
using ElementCompute = accum_t;
using ElementNorm = accum_t;
using ElementSum = accum_t;
using ElementSoftmaxCompute = accum_t;
using LayoutQ = cutlass::layout::RowMajor;
using LayoutK = cutlass::layout::ColumnMajor;
using LayoutP = cutlass::layout::RowMajor;
using LayoutV = cutlass::layout::RowMajor;
using LayoutO = cutlass::layout::RowMajor;
using MatrixCoord = typename LayoutP::TensorCoord;
static bool const kNeedsOutputAccumulatorBuffer = Attention::GemmKernel::kNeedsOutputAccumulatorBuffer;
private:
//
// Data members
//
Options & options;
/// Initialization
cutlass::Distribution::Kind init_Q;
cutlass::Distribution::Kind init_K;
cutlass::Distribution::Kind init_P;
cutlass::Distribution::Kind init_V;
cutlass::Distribution::Kind init_O;
uint32_t seed;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device1;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0_real;
std::vector<int64_t> offset_Q;
std::vector<int64_t> offset_K;
std::vector<int64_t> offset_P;
std::vector<int64_t> offset_V;
std::vector<int64_t> offset_O;
std::vector<int64_t> ldq_host;
std::vector<int64_t> ldk_host;
std::vector<int64_t> ldp_host;
std::vector<int64_t> ldv_host;
std::vector<int64_t> ldo_host;
std::vector<int64_t> seqlen_host;
cutlass::DeviceAllocation<int64_t> ldq;
cutlass::DeviceAllocation<int64_t> ldk;
cutlass::DeviceAllocation<int64_t> ldp;
cutlass::DeviceAllocation<int64_t> ldv;
cutlass::DeviceAllocation<int64_t> ldo;
cutlass::DeviceAllocation<int64_t> seqlen;
cutlass::DeviceAllocation<ElementQ> block_Q;
cutlass::DeviceAllocation<ElementK> block_K;
cutlass::DeviceAllocation<ElementP> block_P;
cutlass::DeviceAllocation<ElementV> block_V;
cutlass::DeviceAllocation<ElementO> block_O;
cutlass::DeviceAllocation<ElementOAccum> block_O_accumulate;
cutlass::DeviceAllocation<ElementNorm> block_Norm;
cutlass::DeviceAllocation<ElementSum> block_Sum;
cutlass::DeviceAllocation<int64_t> offset_P_Device;
cutlass::DeviceAllocation<ElementQ *> ptr_Q;
cutlass::DeviceAllocation<ElementK *> ptr_K;
cutlass::DeviceAllocation<ElementP *> ptr_P;
cutlass::DeviceAllocation<ElementV *> ptr_V;
cutlass::DeviceAllocation<ElementO *> ptr_O;
cutlass::DeviceAllocation<ElementOAccum *> ptr_O_accumulate;
public:
//
// Methods
//
TestbedAttention(
Options &options_,
cutlass::Distribution::Kind init_Q_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_K_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_P_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_V_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_O_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
):
options(options_), init_Q(init_Q_), init_K(init_K_), init_P(init_P_), init_V(init_V_), init_O(init_O_), seed(seed_) { }
int problem_count() const {
return (options.head_number * options.batch_size);
}
private:
/// Helper to initialize a tensor view
template <typename Element>
void initialize_tensor_(
Element *ptr,
size_t capacity,
cutlass::Distribution::Kind dist_kind,
uint32_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<ElementP>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 8;
scope_min = -8;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::device::BlockFillRandomUniform(
ptr, capacity, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::device::BlockFillRandomGaussian(
ptr, capacity, seed, Element(), Element(0.5f));
}
else if (dist_kind == cutlass::Distribution::Sequential) {
// Fill with increasing elements
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(1), Element());
}
else {
// Fill with all 1s
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(), Element(1));
}
}
/// Initializes data structures
void initialize_() {
//
// Set scalors for the mha example
//
options.alpha0 = 1.0f / sqrt(float(options.head_size));
options.alpha1 = 1.0f;
options.beta = 0;
//
// Choose random problem sizes
//
// construct a few problems of random sizes
srand(seed);
int64_t total_elements_Q = 0;
int64_t total_elements_K = 0;
int64_t total_elements_P = 0;
int64_t total_elements_V = 0;
int64_t total_elements_O = 0;
ldq_host.resize(problem_count());
ldk_host.resize(problem_count());
ldp_host.resize(problem_count());
ldv_host.resize(problem_count());
ldo_host.resize(problem_count());
seqlen_host.resize(problem_count());
for (int32_t i = 0; i < problem_count(); ++i) {
auto problem0 = options.problem_sizes0.at(i);
auto problem1 = options.problem_sizes1.at(i);
ldq_host.at(i) = LayoutQ::packed({problem0.m(), problem0.k()}).stride(0);
ldk_host.at(i) = LayoutK::packed({problem0.k(), problem0.n()}).stride(0);
ldp_host.at(i) = LayoutP::packed({problem0.m(), problem0.n()}).stride(0);
ldv_host.at(i) = LayoutV::packed({problem1.k(), problem1.n()}).stride(0);
ldo_host.at(i) = LayoutO::packed({problem1.m(), problem1.n()}).stride(0);
// m = n for attention problems.
seqlen_host.at(i) = problem0.m();
offset_Q.push_back(total_elements_Q);
offset_K.push_back(total_elements_K);
offset_P.push_back(total_elements_P);
offset_V.push_back(total_elements_V);
offset_O.push_back(total_elements_O);
int64_t elements_Q = problem0.m() * problem0.k();
int64_t elements_K = problem0.k() * problem0.n();
int64_t elements_P = problem0.m() * problem0.n();
int64_t elements_V = problem1.k() * problem1.n();
int64_t elements_O = problem1.m() * problem1.n();
total_elements_Q += elements_Q;
total_elements_K += elements_K;
total_elements_P += elements_P;
total_elements_V += elements_V;
total_elements_O += elements_O;
}
problem_sizes_device0.reset(problem_count());
problem_sizes_device1.reset(problem_count());
problem_sizes_device0.copy_from_host(options.problem_sizes0.data());
problem_sizes_device1.copy_from_host(options.problem_sizes1.data());
if (options.use_mask) {
problem_sizes_device0_real.reset(problem_count());
problem_sizes_device0_real.copy_from_host(options.problem_sizes0_real.data());
}
ldq.reset(problem_count());
ldk.reset(problem_count());
ldp.reset(problem_count());
ldv.reset(problem_count());
ldo.reset(problem_count());
seqlen.reset(problem_count());
ldq.copy_from_host(ldq_host.data());
ldk.copy_from_host(ldk_host.data());
ldp.copy_from_host(ldp_host.data());
ldv.copy_from_host(ldv_host.data());
ldo.copy_from_host(ldo_host.data());
seqlen.copy_from_host(seqlen_host.data());
//
// Assign pointers
//
block_Q.reset(total_elements_Q);
block_K.reset(total_elements_K);
block_P.reset(total_elements_P);
block_V.reset(total_elements_V);
block_O.reset(total_elements_O);
if (kNeedsOutputAccumulatorBuffer) {
block_O_accumulate.reset(total_elements_O);
}
offset_P_Device.reset(problem_count());
// sync offset with device
cutlass::device_memory::copy_to_device(offset_P_Device.get(), offset_P.data(), offset_P.size());
std::vector<ElementQ *> ptr_Q_host(problem_count());
std::vector<ElementK *> ptr_K_host(problem_count());
std::vector<ElementP *> ptr_P_host(problem_count());
std::vector<ElementV *> ptr_V_host(problem_count());
std::vector<ElementO *> ptr_O_host(problem_count());
std::vector<ElementOAccum *> ptr_O_accumulate_host(problem_count());
std::vector<ElementNorm *> ptr_norm_host(problem_count());
std::vector<ElementSum *> ptr_sum_host(problem_count());
for (int32_t i = 0; i < problem_count(); ++i) {
ptr_Q_host.at(i) = block_Q.get() + offset_Q.at(i);
ptr_K_host.at(i) = block_K.get() + offset_K.at(i);
ptr_P_host.at(i) = block_P.get() + offset_P.at(i);
ptr_V_host.at(i) = block_V.get() + offset_V.at(i);
ptr_O_host.at(i) = block_O.get() + offset_O.at(i);
if (kNeedsOutputAccumulatorBuffer) {
ptr_O_accumulate_host.at(i) = block_O_accumulate.get() + offset_O.at(i);
}
}
ptr_Q.reset(problem_count());
ptr_Q.copy_from_host(ptr_Q_host.data());
ptr_K.reset(problem_count());
ptr_K.copy_from_host(ptr_K_host.data());
ptr_P.reset(problem_count());
ptr_P.copy_from_host(ptr_P_host.data());
ptr_V.reset(problem_count());
ptr_V.copy_from_host(ptr_V_host.data());
ptr_O.reset(problem_count());
ptr_O.copy_from_host(ptr_O_host.data());
if (kNeedsOutputAccumulatorBuffer) {
ptr_O_accumulate.reset(problem_count());
ptr_O_accumulate.copy_from_host(ptr_O_accumulate_host.data());
}
//
// Initialize the problems of the workspace
//
initialize_tensor_(block_Q.get(), total_elements_Q, init_Q, seed + 1);
initialize_tensor_(block_K.get(), total_elements_K, init_K, seed + 2);
initialize_tensor_(block_V.get(), total_elements_V, init_V, seed + 3);
}
template<typename Element>
bool verify_tensor_(std::vector<Element> vector_Input, \
std::vector<Element> vector_Input_Ref,
int64_t verify_length = -1) {
int64_t size = (vector_Input.size() < vector_Input_Ref.size()) ? vector_Input.size() : vector_Input_Ref.size();
size = (verify_length == -1) ? size : verify_length;
// 0.05 for absolute error
float abs_tol = 5e-2f;
// 10% for relative error
float rel_tol = 1e-1f;
for (int64_t i = 0; i < size; ++i) {
float diff = (float)(vector_Input.at(i) - vector_Input_Ref.at(i));
float abs_diff = fabs(diff);
float abs_ref = fabs((float)vector_Input_Ref.at(i) + 1e-5f);
float relative_diff = abs_diff / abs_ref;
if ( (isnan(abs_diff) || isinf(abs_diff)) || (abs_diff > abs_tol && relative_diff > rel_tol)) {
printf("[%d/%d] diff = %f, rel_diff = %f, {computed=%f, ref=%f}.\n", int(i), int(size), abs_diff, relative_diff, (float)(vector_Input.at(i)), (float)(vector_Input_Ref.at(i)));
return false;
}
}
return true;
}
/// Verifies the result is a GEMM
bool verify_() {
bool passed = true;
for (int32_t i = 0; i < problem_count(); ++i) {
cutlass::gemm::GemmCoord problem0 = options.problem_sizes0.at(i);
cutlass::gemm::GemmCoord problem1 = options.problem_sizes1.at(i);
LayoutQ layout_Q(ldq_host.at(i));
LayoutK layout_K(ldk_host.at(i));
LayoutP layout_P(ldp_host.at(i));
LayoutV layout_V(ldv_host.at(i));
LayoutO layout_O(ldo_host.at(i));
MatrixCoord extent_Q{problem0.m(), problem0.k()};
MatrixCoord extent_K{problem0.k(), problem0.n()};
MatrixCoord extent_P{problem0.m(), problem0.n()};
MatrixCoord extent_V{problem1.k(), problem1.n()};
MatrixCoord extent_O{problem1.m(), problem1.n()};
cutlass::TensorView<ElementQ, LayoutQ> view_Q(block_Q.get() + offset_Q.at(i), layout_Q, extent_Q);
cutlass::TensorView<ElementK, LayoutK> view_K(block_K.get() + offset_K.at(i), layout_K, extent_K);
cutlass::TensorView<ElementP, LayoutP> view_P(block_P.get() + offset_P.at(i), layout_P, extent_P);
cutlass::TensorView<ElementV, LayoutV> view_V(block_V.get() + offset_V.at(i), layout_V, extent_V);
cutlass::DeviceAllocation<ElementP> block_Ref(layout_P.capacity(extent_P));
cutlass::TensorView<ElementP, LayoutP> view_Ref_device(block_Ref.get(), layout_P, extent_P);
cutlass::DeviceAllocation<ElementO> block_Ref_O(layout_O.capacity(extent_O));
cutlass::TensorView<ElementO, LayoutO> view_Ref_O_device(block_Ref_O.get(), layout_O, extent_O);
cutlass::reference::device::TensorFill(view_Ref_O_device, ElementO(0));
// Reference GEMM
cutlass::reference::device::GemmComplex<
ElementQ, LayoutQ,
ElementK, LayoutK,
ElementP, LayoutP,
ElementCompute, ElementAccumulator
>(
problem0,
ElementAccumulator(options.alpha0),
view_Q,
Attention::GemmKernel::MM0::Mma::kTransformA,
view_K,
Attention::GemmKernel::MM0::Mma::kTransformB,
ElementAccumulator(options.beta),
view_P,
view_Ref_device,
ElementAccumulator(0)
);
// Compute softmax for P. We need to explicitly compute softmax
// over P because softmax is fused to the second GEMM in the
// profiled implementation.
std::vector<ElementP> matrix_Ref(layout_P.capacity(extent_P));
cutlass::device_memory::copy_to_host(matrix_Ref.data(), block_Ref.get(), matrix_Ref.size());
cutlass::TensorView<ElementP, LayoutP> view_Ref_host(matrix_Ref.data(), layout_P, extent_P);
std::vector<ElementNorm> vector_Norm_Ref(problem0.m());
std::vector<ElementSum> vector_Sum_Ref(problem0.m());
int n_dim = options.use_mask ? options.problem_sizes0_real.at(i).n() : problem0.n();
// Compute softmax for reference matrix
for (int m = 0; m < problem0.m(); m++) {
int n_dim_row = n_dim;
if (options.causal) {
n_dim_row = std::min(m + 1, n_dim);
}
ElementSoftmaxCompute max = ElementSoftmaxCompute(view_Ref_host.ref().at({m, 0}));
for (int n = 1; n < n_dim_row; n++) {
max = std::max(max, ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})));
}
vector_Norm_Ref.at(m) = ElementNorm(max);
ElementSoftmaxCompute sum = ElementSoftmaxCompute();
for (int n = 0; n < n_dim_row; n++) {
sum += std::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max );
}
ElementSoftmaxCompute inv_sum = ElementSoftmaxCompute(1.0f / sum);
vector_Sum_Ref.at(m) = ElementSum(inv_sum);
for (int n = 0; n < n_dim_row; n++) {
view_Ref_host.ref().at({m, n}) = ElementP(
std::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max ) * inv_sum
);
}
// Mask out the rest of the attention matrix
for (int n = n_dim_row; n < n_dim; ++n) {
view_Ref_host.ref().at({m, n}) = ElementP(0);
}
}
// when not using mask, problem_real and problem share the same sizes
if (options.use_mask) {
for (int m = 0; m < problem0.m(); m++) {
for (int n = n_dim; n < problem0.n(); n++) {
view_Ref_host.ref().at({m, n}) = ElementP(0);
}
}
}
cutlass::device_memory::copy_to_device(block_P.get() + offset_P.at(i), matrix_Ref.data(), matrix_Ref.size());
// Reference GEMM
cutlass::reference::device::GemmComplex<
ElementP, LayoutP,
ElementV, LayoutV,
ElementO, LayoutO,
ElementCompute, ElementAccumulator
>(
problem1,
ElementAccumulator(options.alpha1),
view_P,
Attention::GemmKernel::MM0::Mma::kTransformA,
view_V,
Attention::GemmKernel::MM0::Mma::kTransformB,
ElementAccumulator(options.beta),
view_Ref_O_device,
view_Ref_O_device,
ElementAccumulator(0)
);
// Copy to host memory
cutlass::TensorView<ElementP, LayoutP> view_Ref(matrix_Ref.data(), layout_P, extent_P);
std::vector<ElementO> matrix_O(layout_O.capacity(extent_O));
cutlass::device_memory::copy_to_host(matrix_O.data(), block_O.get() + offset_O.at(i), matrix_O.size());
std::vector<ElementO> matrix_Ref_O(layout_O.capacity(extent_O));
cutlass::device_memory::copy_to_host(matrix_Ref_O.data(), block_Ref_O.get(), matrix_Ref_O.size());
bool verified_O = false;
if (!verified_O) {
verified_O = verify_tensor_<ElementO>(matrix_O, matrix_Ref_O);
}
passed = passed && verified_O;
if (!passed) {
std::cerr << "\n***\nError - problem " << i << " failed the QA check\n***\n" << std::endl;
if (!verified_O) {
std::cout << "Final matrix output is incorrect" << std::endl;
}
return passed;
}
}
return passed;
}
public:
/// Executes a CUTLASS Attention kernel and measures runtime.
Result profile() {
Result result;
result.passed = false;
int threadblock_count = Attention::sufficient(options.problem_sizes1.data(), options.problem_count);
// Early exit
if (!threadblock_count) {
std::cout << "Active CUDA device lacks hardware resources to run CUTLASS Grouped FMHA kernel." << std::endl;
return result;
}
result.passed = false;
// Initialize the problem
initialize_();
typename Attention::Arguments args(
problem_sizes_device0.get(),
problem_sizes_device1.get(),
options.problem_count,
threadblock_count,
ptr_Q.get(),
ptr_K.get(),
ptr_P.get(),
ptr_V.get(),
ptr_O.get(),
ptr_O_accumulate.get(),
ldq.get(),
ldk.get(),
ldp.get(),
ldv.get(),
ldo.get(),
options.causal,
options.alpha0,
options.problem_sizes1.data()
);
Attention fmha;
size_t workspace_size = fmha.get_workspace_size(args);
cutlass::DeviceAllocation<uint8_t> workspace(workspace_size);
result.status = fmha.initialize(args, workspace.get());
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to initialize CUTLASS Grouped FMHA kernel." << std::endl;
return result;
}
// Run the grouped FMHA object
result.status = fmha.run();
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run CUTLASS Grouped FMHA kernel." << std::endl;
return result;
}
// Wait for completion
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Verify correctness
//
result.passed = true;
if (options.reference_check) {
result.passed = verify_();
}
//
// Warm-up run of the grouped FMHA object
//
result.status = fmha.run();
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run CUTLASS Grouped FMHA kernel." << std::endl;
return result;
}
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of FMHA operations
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
for (int iter = 0; iter < this->options.iterations; ++iter) {
fmha();
}
//
// Stop profiling loop
//
// Record an event when the GEMM operations have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(this->options.iterations);
result.gflops = this->options.gflops(result.runtime_ms / 1000.0);
//
// Cleanup
//
for (auto event : events) {
(void)cudaEventDestroy(event);
}
std::cout << std::endl;
std::cout << "CUTLASS Attention:\n"
<< "====================================================" << std::endl;
std::cout << " " << " {seq length Q, seq length KV, head size, head size V, head number, batch size} = {" << options.seq_length \
<< ", " << options.seq_length_kv << ", " << options.head_size << ", " << options.head_size_v << ", " << options.head_number\
<< ", " << options.batch_size << "}." << std::endl;
options.print_problems();
std::cout << std::endl;
std::cout << " " << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " " << "GFLOPs: " << result.gflops << std::endl;
return result;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
int kQueriesPerBlock,
int kKeysPerBlock,
int kMaxK,
cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode_
>
int run_grouped(Options& options) {
using AttentionKernel = typename cutlass::gemm::kernel::DefaultFMHAGrouped<
cutlass::half_t, // scalar_t
cutlass::arch::Sm80, // ArchTag
true, // Memory is aligned
kQueriesPerBlock,
kKeysPerBlock,
kMaxK,
GroupScheduleMode_
>::FMHAKernel;
using FMHA = cutlass::gemm::device::GemmGrouped<AttentionKernel>;
//
// Test and profile
//
TestbedAttention<FMHA> testbed(options);
Result result = testbed.profile();
if (!result.passed) {
std::cout << "Profiling CUTLASS attention has failed.\n";
std::cout << "\nFailed\n";
return -1;
}
std::cout << "\nPassed\n";
return 0;
}
template <
int kQueriesPerBlock,
int kKeysPerBlock,
int kMaxK
>
int run_attention(Options& options) {
if (options.scheduler_mode == cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly) {
return run_grouped<kQueriesPerBlock,
kKeysPerBlock,
kMaxK,
cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly>(options);
} else {
return run_grouped<kQueriesPerBlock,
kKeysPerBlock,
kMaxK,
cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute>(options);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
//
// This example uses mma.sync to directly access Tensor Cores to achieve peak performance.
//
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) {
//
// This example requires an NVIDIA Ampere-architecture GPU.
//
std::cout
<< "CUTLASS's CUTLASS Attention example requires a GPU of NVIDIA's Ampere Architecture or "
<< "later (compute capability 80 or greater).\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.error) {
std::cerr << "Aborting execution." << std::endl;
return -1;
}
if (options.use_mask) {
std::cerr << "--use_mask is not supported at the moment\n";
return -2;
}
if (options.alignment != 1) {
std::cerr << "--alignment=1 is the only supported value\n";
return -2;
}
// Determine kernel configuration based on head size.
// If head size is less than or equal to 64, each block operates over 64 queries and
// 64 keys, and partial results can be stored in the register file.
// If head size is greater than 64, each block operates over 32 queries and 128 keys,
// and partial results are stored in shared memory.
if (options.head_size_v > 64) {
static int const kQueriesPerBlock = 32;
static int const kKeysPerBlock = 128;
if (options.head_size_v <= kKeysPerBlock) {
return run_attention<kQueriesPerBlock, kKeysPerBlock, 128>(options);
} else {
return run_attention<kQueriesPerBlock, kKeysPerBlock, 65536>(options);
}
} else {
static constexpr int kMaxK = 64; // <- Decrease to 32/16 if your problem is smaller
static int const kQueriesPerBlock = 64;
static int const kKeysPerBlock = 64;
return run_attention<kQueriesPerBlock, kKeysPerBlock, kMaxK>(options);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/fused_multihead_attention_variable_seqlen.cu/0 | {
"file_path": "examples/41_fused_multi_head_attention/fused_multihead_attention_variable_seqlen.cu",
"repo_id": "examples",
"token_count": 16103
} | 5 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cmath>
#include <type_traits>
#include <vector>
#include <cuda_fp16.h>
#include <curand_kernel.h>
#ifdef HAS_PYTORCH
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAGraphsUtils.cuh>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/scale_type.h"
#include "cutlass/fast_math.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/vector.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "debug_utils.h"
#include "gemm_kernel_utils.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/threadblock/epilogue_smem_accumulator.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/integer_subbyte.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/platform/platform.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/vector_iterator.h"
#include "epilogue/epilogue_pipelined.h"
#include "iterators/epilogue_predicated_tile_iterator.h"
#include "gemm/custom_mma.h"
#include "gemm/find_default_mma.h"
#include "gemm/mma_accum_lambda_iterator.h"
#include "gemm/mma_from_smem.h"
#include "transform/tile_smem_loader.h"
#include <inttypes.h>
using namespace gemm_kernel_utils;
namespace {
template <typename FragmentType, int32_t kNumThreads>
struct GmemTile {
/*
Helper functions to efficient store/load RF to gmem
GEMM accumulators have a particular format on A100, and
it takes some compute/shared-memory to rearrange them to
a RowMajor or ColumnMajor format in global memory through
an Epilogue. The same complexity goes for loading into RF.
This class loads/stores RF as they are, and can be used for
efficient accumulation across gemms for instance:
```
GmemTile tile;
for (int i = 0; i < N; ++i) {
// ...
Fragment accum;
if (i == 0) {
accum.clear();
} else {
tile.load(accum);
}
mma(accum, ...);
if (i < N-1) {
// Store for next GEMM
tile.store(accum);
} else {
// Store in tensor (eg RowMajor)
epilogue(accum);
}
// ...
}
```
*/
// 128bits per thread
using AccessType = cutlass::Array<float, 4>;
static constexpr int32_t kBytes = sizeof(AccessType);
static constexpr int32_t kStride = kNumThreads * AccessType::kElements;
static constexpr int32_t kNumIters =
FragmentType::kElements / AccessType::kElements;
static constexpr int32_t kElementsStored =
kNumThreads * FragmentType::kElements;
static_assert(
FragmentType::kElements % AccessType::kElements == 0,
"fragment not aligned on 128 bits");
float* ptr;
CUTLASS_DEVICE void load(FragmentType& fragment, int thread_id) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kNumIters; ++i) {
AccessType* __restrict__ gmem_ptr = reinterpret_cast<AccessType*>(
ptr + thread_id * AccessType::kElements + i * kStride);
AccessType sub_fragment;
cutlass::arch::global_load<AccessType, kBytes>(
sub_fragment, gmem_ptr, true);
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < AccessType::kElements; ++j) {
fragment[i * AccessType::kElements + j] = sub_fragment[j];
}
}
}
CUTLASS_DEVICE void store(FragmentType const& fragment, int thread_id) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kNumIters; ++i) {
AccessType* __restrict__ gmem_ptr = reinterpret_cast<AccessType*>(
ptr + thread_id * AccessType::kElements + i * kStride);
AccessType sub_fragment;
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < AccessType::kElements; ++j) {
sub_fragment[j] = fragment[i * AccessType::kElements + j];
}
cutlass::arch::global_store<AccessType, kBytes>(
sub_fragment, gmem_ptr, true);
}
}
CUTLASS_DEVICE void storeAtomicAdd(
FragmentType const& fragment,
int thread_id) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kNumIters; ++i) {
float* gmem_ptr = ptr + thread_id * AccessType::kElements + i * kStride;
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < AccessType::kElements; ++j) {
float val = fragment[i * AccessType::kElements + j];
float* ptr = gmem_ptr + j;
atomicAdd(ptr, val);
}
}
}
};
struct AtomicLock {
CUTLASS_DEVICE static void acquire(
int32_t* lock,
int set_val,
int thread_id) {
if (thread_id == 0) {
while (atomicCAS(lock, 0 /*cmp*/, set_val /*setval*/) != set_val) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__nanosleep(40);
#endif
}
}
__syncthreads();
}
CUTLASS_DEVICE static void release(int32_t* lock, int thread_id) {
if (thread_id == 0) {
int status = 0;
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
asm volatile("st.global.release.gpu.b32 [%0], %1;\n"
:
: "l"(lock), "r"(status));
#else
asm volatile("st.global.cg.b32 [%0], %1;\n" : : "l"(lock), "r"(status));
#endif
}
}
};
template <typename scalar_t, typename Arch>
constexpr int getWarpsPerSmBw() {
bool is_half = !cutlass::platform::is_same<scalar_t, float>::value;
if (Arch::kMinComputeCapability >= 80) {
return is_half ? 12 : 8;
}
return 8;
}
} // namespace
template <
// which arch we target (eg `cutlass::arch::Sm80`)
typename ArchTag_,
// input/output type
typename scalar_t_,
// run optimized kernel because memory accesses will be aligned
bool kIsAligned_,
// use dropout if enabled
bool kApplyDropout_,
// when doing a GEMM, preload the next one (uses more shmem)
bool kPreload_,
// block dimensions
int kBlockSizeI_,
int kBlockSizeJ_,
// upperbound on `max(value.shape[-1], query.shape[-1])`
int kMaxK_ = (int)cutlass::platform::numeric_limits<uint32_t>::max(),
// assumes that `cu_seqlen` is None, and
// (1) `num_queries % kBlockSizeI == 0`
// (2) `num_keys % kBlockSizeJ == 0`
bool kKeysQueriesAlignedToBlockSize_ = false,
// Allows to parallelize across keys
bool kEnableSplitKeys_ = true>
struct AttentionBackwardKernel {
enum CustomMaskType {
NoCustomMask = 0,
CausalFromTopLeft = 1,
CausalFromBottomRight = 2,
NumCustomMaskTypes,
};
using scalar_t = scalar_t_;
using output_t = scalar_t;
using output_accum_t = float;
using lse_scalar_t = float;
using accum_t = float;
using ArchTag = ArchTag_;
static constexpr bool kIsAligned = kIsAligned_;
static constexpr bool kApplyDropout = kApplyDropout_;
static constexpr bool kPreload = kPreload_;
static constexpr int kBlockSizeI = kBlockSizeI_;
static constexpr int kBlockSizeJ = kBlockSizeJ_;
static constexpr int kMaxK = kMaxK_;
static constexpr bool kKeysQueriesAlignedToBlockSize =
kKeysQueriesAlignedToBlockSize_;
static constexpr int64_t kWarpSize = 32;
// If this is true, we store and accumulate dK/dV in RF
// rather than going back to gmem everytime
static constexpr bool kIsHalf = cutlass::sizeof_bits<scalar_t>::value <= 16;
static constexpr bool kOutputInRF = kIsHalf && kMaxK <= kBlockSizeI;
static_assert(
!kPreload ||
(kIsHalf && ArchTag::kMinComputeCapability >= 80 && kOutputInRF),
"preload MMA not supported");
static constexpr bool kPrologueQK = kPreload;
static constexpr bool kPrologueGV = kPreload;
static constexpr bool kPrologueDOV = kPreload;
static constexpr bool kPrologueGQ = kPreload;
static constexpr bool kPrologueGK = kPreload;
static constexpr int64_t kNumWarpsPerBlock =
(kBlockSizeI * kBlockSizeJ) / (32 * 32);
// Compute delta for the f16 kernels
// TODO: Figure out why it's slower on the f32 kernels
// (something due to RF pressure?)
// TODO: Remove condition on `kOutputInRF` - this is needed to work
// around a compiler bug on V100, not exactly sure why but I spent
// too much time on this already. Reproducible with
// (B, Mq, Mkv, K) = (1, 1, 1, 136) for instance
static constexpr bool kKernelComputesDelta =
kIsHalf && (kOutputInRF || ArchTag::kMinComputeCapability != 70);
// Launch bounds
static constexpr int64_t kNumThreads = kWarpSize * kNumWarpsPerBlock;
static constexpr int64_t kMinBlocksPerSm =
getWarpsPerSmBw<scalar_t, ArchTag>() / kNumWarpsPerBlock;
using GemmType = DefaultGemmType<ArchTag, scalar_t>;
using DefaultConfig =
typename cutlass::gemm::device::DefaultGemmConfiguration<
typename GemmType::OpClass,
ArchTag,
scalar_t,
scalar_t,
scalar_t, // ElementC
accum_t // ElementAccumulator
>;
static constexpr auto kOptimalAlignement = cutlass::platform::max(
DefaultConfig::kAlignmentA,
DefaultConfig::kAlignmentB);
static constexpr auto kMinimumAlignment = GemmType::kMinimumAlignment;
struct MatmulQK {
/*
attn_T = k_j @ q_i.transpose(-2, -1) # matmul
attn_T = (attn_T - logsumexp[i_start:i_end].unsqueeze(1).transpose(-2,
-1)).exp() # epilogue
with attn_T.shape = (kBlockSizeJ, kBlockSizeI)
*/
using ThreadblockShape =
cutlass::gemm::GemmShape<kBlockSizeJ, kBlockSizeI, GemmType::ThreadK>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
using DefaultMma = typename cutlass::gemm::threadblock::DefaultMma<
scalar_t, // ElementA
cutlass::layout::RowMajor, // LayoutA
kIsAligned ? DefaultConfig::kAlignmentA : GemmType::kMinimumAlignment,
scalar_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment,
accum_t, // ElementC
cutlass::layout::RowMajor, // LayoutC
typename GemmType::OpClass,
ArchTag,
ThreadblockShape,
WarpShape,
typename GemmType::InstructionShape,
DefaultConfig::kStages,
typename GemmType::Operator,
false, // AccumulatorsInRowMajor = false,
cutlass::gemm::SharedMemoryClearOption::kNone>;
using MmaCore = typename DefaultMma::MmaCore;
using Mma =
typename MakeCustomMma<typename DefaultMma::ThreadblockMma, kMaxK>::Mma;
// used for efficient load of bias tile (Bij) from global memory to shared
// memory
using BiasLoader = TileSmemLoader<
scalar_t,
// Bij is applied to transposed attn matrix tile (Pij.T). Bij is loaded
// row-major but needs to have transposed shape so we get the same
// elements.
cutlass::MatrixShape<ThreadblockShape::kN, ThreadblockShape::kM>,
MmaCore::kThreads,
// input restriction: kv_len has to be a multiple of this value
128 / cutlass::sizeof_bits<scalar_t>::value>;
// Epilogue to store to shared-memory in a format that we can use later for
// the second matmul
using B2bGemm = typename cutlass::gemm::threadblock::B2bGemm<
typename Mma::Operator::IteratorC,
typename Mma::Operator,
scalar_t,
WarpShape,
ThreadblockShape>;
using AccumLambdaIterator = typename DefaultMmaAccumLambdaIterator<
typename Mma::Operator::IteratorC,
accum_t,
kWarpSize>::Iterator;
using AccumulatorSharedStorage = typename B2bGemm::AccumulatorSharedStorage;
};
struct MatmulGradV {
/*
grad_v[j_start:j_end] += attn_T @ do_i # matmul
Dimensions: (kBlockSizeJ * kNumWarpsPerBlock, kBlockSizeI, K)
(we might need to iterate multiple times on K)
*/
using ThreadblockShape =
cutlass::gemm::GemmShape<kBlockSizeJ, kBlockSizeI, GemmType::ThreadK>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
using InstructionShape = typename GemmType::InstructionShape;
using DefaultGemm = cutlass::gemm::kernel::DefaultGemm<
scalar_t, // ElementA,
cutlass::layout::RowMajor, // LayoutA,
DefaultConfig::kAlignmentA,
scalar_t, // ElementB,
cutlass::layout::RowMajor, // LayoutB,
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment,
output_t,
cutlass::layout::RowMajor, // LayoutC,
accum_t,
typename GemmType::OpClass,
ArchTag,
ThreadblockShape,
WarpShape,
typename GemmType::InstructionShape,
typename DefaultConfig::EpilogueOutputOp,
void, // ThreadblockSwizzle - not used
DefaultConfig::kStages,
false, // SplitKSerial
typename GemmType::Operator>;
// if dropout:
// for computing dVj += (Pij.T * Zij) @ dOi
// Pij_dropped.T = Pij.T * Zij is computed on the fly as fragments of
// Pij.T are loaded in. The reason we do it this way is because Pij.T and
// Zij are reused in later steps, while Pij_dropped.T is only needed in
// this step. computing Pij_dropped.T on the fly allows us to avoid
// keeping all 3 of Pij_dropped.T, Pij.T, and Zij in shared memory at the
// same time.
// if no dropout:
// for computing dVj += Pij.T @ dOi
using WarpIteratorA = typename cutlass::gemm::threadblock::
DefaultWarpIteratorAFromSharedMemory<
typename DefaultGemm::Mma::Operator::Shape, // WarpShape
typename DefaultGemm::Mma::Operator::
InstructionShape, // InstructionShape
typename DefaultGemm::Mma::Operator::
IteratorA, // RegularWarpIterator
typename DefaultGemm::Mma::Policy // Policy
>::WarpIterator;
using DefaultMmaFromSmem =
typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory<
typename DefaultGemm::Mma,
MatmulQK::AccumulatorSharedStorage::Shape::kN,
WarpIteratorA,
kApplyDropout>; // kScaleOperandA
using Mma = typename DefaultMmaFromSmem::Mma;
using IteratorB = typename Mma::IteratorB;
using WarpCount = typename Mma::WarpCount;
// Epilogue
using DefaultOutputOp = typename DefaultConfig::EpilogueOutputOp;
using DefaultEpilogue = typename DefaultGemm::Epilogue;
using OutputTileIterator =
typename cutlass::epilogue::threadblock::MakePrefetchableIterator<
typename DefaultEpilogue::OutputTileIterator>::Iterator;
using AccumTileGmem = GmemTile<typename Mma::FragmentC, (int)kNumThreads>;
};
struct MatmulDOIVJ {
/*
doi_t_vj = do_i @ v_j.transpose(-2, -1) # matmul
tmp = (doi_t_vj - Di.unsqueeze(1)) * attn # inplace / epilogue?
*/
using ThreadblockShape =
cutlass::gemm::GemmShape<kBlockSizeI, kBlockSizeJ, GemmType::ThreadK>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
using ElementC = output_t;
using ElementAccum = accum_t;
// no-op output op - epilogue just stores result to global memory
using BiasGradEpilogueOutputOp =
typename cutlass::epilogue::thread::LinearCombination<
ElementC,
DefaultConfig::EpilogueOutputOp::kCount,
typename DefaultConfig::EpilogueOutputOp::ElementAccumulator,
typename DefaultConfig::EpilogueOutputOp::ElementCompute,
cutlass::epilogue::thread::ScaleType::Nothing>;
using DefaultGemm = typename cutlass::gemm::kernel::DefaultGemm<
scalar_t, // ElementA
cutlass::layout::RowMajor, // LayoutA
kIsAligned ? DefaultConfig::kAlignmentA : GemmType::kMinimumAlignment,
scalar_t, // ElementB
cutlass::layout::ColumnMajor, // LayoutB
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment,
ElementC, // ElementC
cutlass::layout::RowMajor, // LayoutC
ElementAccum, // ElementAccumulator
typename GemmType::OpClass,
ArchTag,
ThreadblockShape,
WarpShape,
typename GemmType::InstructionShape,
BiasGradEpilogueOutputOp, // EpilogueOutputOp
void, // ThreadblockSwizzle (not used)
// multiple preloads, dropout Zij tile, and 3 stages push us over shared
// memory capacity on A100. set a ceiling on number of stages to save
// shared memory if dropout is in use.
kPreload && kApplyDropout && (kBlockSizeI * kBlockSizeJ > 64 * 64)
? cutlass::const_min(2, DefaultConfig::kStages)
: DefaultConfig::kStages, // Stages
false, // SplitKSerial
typename GemmType::Operator,
cutlass::gemm::SharedMemoryClearOption::kNone>;
using Mma = typename MakeCustomMma<typename DefaultGemm::Mma, kMaxK>::Mma;
using AccumLambdaIterator = typename DefaultMmaAccumLambdaIterator<
typename Mma::Operator::IteratorC,
ElementAccum,
kWarpSize>::Iterator;
// epilogue used to write bias gradient, which is just the output of this
// matmul with some operations applied to the fragment
using BiasGradEpilogue = typename DefaultGemm::Epilogue;
// Epilogue to store to shared-memory in a format that we can use later for
// the second matmul
using B2bGemm = typename cutlass::gemm::threadblock::B2bGemm<
typename DefaultGemm::Mma::Operator::IteratorC,
typename DefaultGemm::Mma::Operator,
scalar_t,
WarpShape,
ThreadblockShape>;
using AccumulatorSharedStorage = typename B2bGemm::AccumulatorSharedStorage;
};
struct MatmulGradQ {
// grad_q <- tmp @ k_j
using ThreadblockShape =
cutlass::gemm::GemmShape<kBlockSizeI, kBlockSizeJ, GemmType::ThreadK>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
using InstructionShape = typename GemmType::InstructionShape;
using DefaultGemm = cutlass::gemm::kernel::DefaultGemm<
scalar_t, // ElementA,
cutlass::layout::RowMajor, // LayoutA,
DefaultConfig::kAlignmentA,
scalar_t, // ElementB,
cutlass::layout::RowMajor, // LayoutB,
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment,
output_t,
cutlass::layout::RowMajor, // LayoutC,
accum_t,
typename GemmType::OpClass,
ArchTag,
ThreadblockShape,
WarpShape,
typename GemmType::InstructionShape,
typename DefaultConfig::EpilogueOutputOp,
void, // ThreadblockSwizzle - not used
DefaultConfig::kStages,
false, // SplitKSerial
typename GemmType::Operator>;
using WarpIteratorA = typename cutlass::gemm::threadblock::
DefaultWarpIteratorAFromSharedMemory<
typename DefaultGemm::Mma::Operator::Shape,
typename DefaultGemm::Mma::Operator::InstructionShape,
typename DefaultGemm::Mma::Operator::IteratorA,
typename DefaultGemm::Mma::Policy>::WarpIterator;
using DefaultMmaFromSmem =
typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory<
typename DefaultGemm::Mma,
MatmulDOIVJ::AccumulatorSharedStorage::Shape::kN,
WarpIteratorA,
false>; // kScaleOperandA
using Mma = typename DefaultMmaFromSmem::Mma;
using IteratorB = typename Mma::IteratorB;
using WarpCount = typename Mma::WarpCount;
// Epilogue
using DefaultOutputOp = typename DefaultConfig::EpilogueOutputOp;
using DefaultEpilogue = typename DefaultGemm::Epilogue;
using OutputTileIterator =
typename cutlass::epilogue::threadblock::MakePrefetchableIterator<
typename DefaultEpilogue::OutputTileIterator>::Iterator;
using AccumTileGmem = GmemTile<typename Mma::FragmentC, (int)kNumThreads>;
};
struct MatmulGradK {
// grad_k <- tmp.transpose(-2, -1) @ q_i
using ThreadblockShape =
cutlass::gemm::GemmShape<kBlockSizeJ, kBlockSizeI, GemmType::ThreadK>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
using InstructionShape = typename GemmType::InstructionShape;
using DefaultGemm = cutlass::gemm::kernel::DefaultGemm<
scalar_t, // ElementA,
cutlass::layout::RowMajor, // LayoutA,
DefaultConfig::kAlignmentA,
scalar_t, // ElementB,
cutlass::layout::RowMajor, // LayoutB,
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment,
output_t,
cutlass::layout::RowMajor, // LayoutC,
accum_t,
typename GemmType::OpClass,
ArchTag,
ThreadblockShape,
WarpShape,
typename GemmType::InstructionShape,
typename DefaultConfig::EpilogueOutputOp,
void, // ThreadblockSwizzle - not used
DefaultConfig::kStages,
false, // SplitKSerial
typename GemmType::Operator>;
using WarpIteratorA = typename cutlass::gemm::threadblock::
DefaultWarpIteratorAFromSharedMemory<
typename DefaultGemm::Mma::Operator::Shape,
typename DefaultGemm::Mma::Operator::InstructionShape,
typename DefaultGemm::Mma::Operator::IteratorA,
typename DefaultGemm::Mma::Policy>::WarpIterator;
using DefaultMmaFromSmemN =
typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory<
typename DefaultGemm::Mma,
MatmulQK::AccumulatorSharedStorage::Shape::kN, // kMaxK
WarpIteratorA,
false>; // kScaleOperandA
using DefaultMmaFromSmemT =
typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory<
typename DefaultGemm::Mma,
MatmulDOIVJ::AccumulatorSharedStorage::Shape::kM, // kMaxK
WarpIteratorA,
false, // kScaleOperandA
kPreload>; // kTransposeA
using DefaultMmaFromSmem = typename cutlass::platform::conditional<
DefaultMmaFromSmemT::kIsTransposedA,
DefaultMmaFromSmemT,
DefaultMmaFromSmemN>::type;
using Mma = typename DefaultMmaFromSmem::Mma;
using IteratorB = typename Mma::IteratorB;
using WarpCount = typename Mma::WarpCount;
// Epilogue
using DefaultOutputOp = typename DefaultConfig::EpilogueOutputOp;
using DefaultEpilogue = typename DefaultGemm::Epilogue;
using OutputTileIterator =
typename cutlass::epilogue::threadblock::MakePrefetchableIterator<
typename DefaultEpilogue::OutputTileIterator>::Iterator;
using AccumTileGmem = GmemTile<typename Mma::FragmentC, (int)kNumThreads>;
};
static constexpr bool kEnableSplitKeys = kEnableSplitKeys_;
static constexpr bool kNeedsAccumGradQ = kEnableSplitKeys ||
!cutlass::platform::is_same<output_accum_t, output_t>::value;
static constexpr bool kNeedsAccumGradK = !kOutputInRF &&
!cutlass::platform::is_same<output_accum_t, output_t>::value;
static constexpr bool kNeedsAccumGradV = !kOutputInRF &&
!cutlass::platform::is_same<output_accum_t, output_t>::value;
struct GradQTempStorage {
int32_t lock;
int32_t counter;
int32_t pad[2]; // pad to 128bits
output_accum_t buffer[MatmulGradQ::AccumTileGmem::kElementsStored];
};
struct Params {
// Input tensors
scalar_t* query_ptr = nullptr; // [Mq, nH, K]
scalar_t* key_ptr = nullptr; // [Mk, nH, K]
scalar_t* value_ptr = nullptr; // [Mk, nH, Kv]
scalar_t* bias_ptr = nullptr;
lse_scalar_t* logsumexp_ptr = nullptr; // [nH, Mq]
scalar_t* output_ptr = nullptr; // [Mq, nH, Kv]
scalar_t* grad_output_ptr = nullptr; // [Mq, nH, Kv]
accum_t* delta_ptr = nullptr; // [nH, Mq]
int32_t* cu_seqlens_q_ptr = nullptr;
int32_t* cu_seqlens_k_ptr = nullptr;
// Output tensors
output_t* grad_query_ptr = nullptr; // [Mq, nH, K]
output_t* grad_key_ptr = nullptr; // [Mk, nH, K]
output_t* grad_value_ptr = nullptr; // [Mk, nH, Kv]
output_t* grad_bias_ptr = nullptr;
// Accumulators
output_accum_t* workspace = nullptr; // [Mq, Kq] + [Mkv, Kq] + [Mkv, Kv]
output_accum_t* workspace_gv =
nullptr; // (will be calculated by the kernel)
GradQTempStorage* workspace_gq =
nullptr; // (will be calculated by the kernel)
// Scale
accum_t scale = 1.0f;
// Dimensions/strides
int32_t head_dim = -1;
int32_t head_dim_value = -1;
int32_t num_queries = -1;
int32_t num_keys = -1;
int32_t num_heads = -1;
uint8_t custom_mask_type = NoCustomMask;
int32_t q_strideM = -1;
int32_t k_strideM = -1;
int32_t v_strideM = -1;
int32_t bias_strideM = 0;
int32_t gO_strideM = -1;
int32_t gB_strideM = -1;
int8_t gQKV_strideM_multiplier = 1; // 3 for packed, 1 otherwise
#ifdef HAS_PYTORCH
// dropout
at::PhiloxCudaState rng_engine_inputs = {0, 0};
#endif
// RNG sequence offset based on batch_id and head_id
unsigned long long dropout_batch_head_rng_offset = 0;
float dropout_prob = 0.0f;
CUTLASS_HOST_DEVICE int32_t o_strideM() const {
return head_dim_value * num_heads;
}
CUTLASS_HOST_DEVICE int32_t gQ_strideM() const {
return gQKV_strideM_multiplier * num_heads * head_dim;
}
CUTLASS_HOST_DEVICE int32_t gK_strideM() const {
return gQKV_strideM_multiplier * num_heads * head_dim;
}
CUTLASS_HOST_DEVICE int32_t gV_strideM() const {
return gQKV_strideM_multiplier * num_heads * head_dim_value;
}
// Everything below is only used in `advance_to_block`
// and shouldn't use registers
int64_t o_strideH = -1;
int32_t q_strideH = -1;
int32_t k_strideH = -1;
int32_t v_strideH = -1;
int64_t bias_strideH = 0;
int64_t o_strideB = -1;
int64_t q_strideB = -1;
int64_t k_strideB = -1;
int64_t v_strideB = -1;
int64_t bias_strideB = 0;
int64_t lse_strideB = -1;
int64_t lse_strideH = -1;
int64_t delta_strideB = -1;
int64_t delta_strideH = -1;
int32_t num_batches = -1;
int16_t num_splits_key = 1; // We use `gridDim.x` inside kernel
int64_t gO_strideB = 0;
int64_t gQ_strideB = 0;
int64_t gK_strideB = 0;
int64_t gV_strideB = 0;
int64_t gB_strideB = 0;
int64_t gO_strideH = 0;
int64_t gQ_strideH = 0;
int64_t gK_strideH = 0;
int64_t gV_strideH = 0;
int64_t gB_strideH = 0;
CUTLASS_DEVICE int16_t num_splits_key_device() const {
return kEnableSplitKeys ? gridDim.x : 1;
}
CUTLASS_DEVICE int16_t split_key_device() const {
return kEnableSplitKeys ? blockIdx.x : 0;
}
CUTLASS_DEVICE bool advance_to_block() {
int64_t batch_id = blockIdx.z;
int32_t head_id = blockIdx.y;
if (kNeedsAccumGradQ || kNeedsAccumGradK || kNeedsAccumGradV) {
assert(workspace_size() == 0 || workspace != nullptr);
workspace += (batch_id * num_heads + head_id) * workspace_strideBH();
workspace = warp_uniform(workspace);
workspace_gv = workspace + workspace_elements_gk();
workspace_gq =
(GradQTempStorage*)(workspace_gv + workspace_elements_gv());
if (kEnableSplitKeys) {
workspace_gv += workspace_elements_gv() * split_key_device() /
num_splits_key_device();
workspace += workspace_elements_gk() * split_key_device() /
num_splits_key_device();
}
} else {
workspace = nullptr;
}
// Advance pointers that depend on the total concatenated
// number of queries, as `num_queries` is modified in the block
// below
dropout_batch_head_rng_offset =
batch_id * (num_heads * num_queries * num_keys) +
head_id * (num_queries * num_keys);
logsumexp_ptr += batch_id * lse_strideB + head_id * lse_strideH;
if (cu_seqlens_q_ptr != nullptr) {
assert(cu_seqlens_k_ptr != nullptr);
cu_seqlens_q_ptr += batch_id;
cu_seqlens_k_ptr += batch_id;
int32_t q_start = cu_seqlens_q_ptr[0];
int32_t k_start = cu_seqlens_k_ptr[0];
int64_t q_next_start = cu_seqlens_q_ptr[1];
int64_t k_next_start = cu_seqlens_k_ptr[1];
assert(q_next_start - q_start <= num_queries);
assert(k_next_start - k_start <= num_keys);
num_queries = q_next_start - q_start;
num_keys = k_next_start - k_start;
// Jump manually
batch_id = 0;
query_ptr += q_start * q_strideM;
key_ptr += k_start * k_strideM;
value_ptr += k_start * v_strideM;
assert(bias_ptr == nullptr);
assert(grad_bias_ptr == nullptr);
output_ptr += q_start * o_strideM();
grad_output_ptr += q_start * gO_strideM;
delta_ptr += q_start;
grad_query_ptr += q_start * gQ_strideM();
grad_key_ptr += k_start * gK_strideM();
grad_value_ptr += k_start * gV_strideM();
}
query_ptr += batch_id * q_strideB + head_id * q_strideH;
key_ptr += batch_id * k_strideB + head_id * k_strideH;
value_ptr += batch_id * v_strideB + head_id * v_strideH;
if (bias_ptr != nullptr) {
bias_ptr += batch_id * bias_strideB + head_id * bias_strideH;
}
output_ptr += batch_id * o_strideB + head_id * o_strideH;
grad_output_ptr += batch_id * gO_strideB + head_id * gO_strideH;
delta_ptr += batch_id * delta_strideB + head_id * delta_strideH;
grad_query_ptr += batch_id * gQ_strideB + head_id * gQ_strideH;
grad_key_ptr += batch_id * gK_strideB + head_id * gK_strideH;
grad_value_ptr += batch_id * gV_strideB + head_id * gV_strideH;
if (grad_bias_ptr != nullptr) {
grad_bias_ptr += batch_id * gB_strideB + head_id * gB_strideH;
}
// Some values are modified above
// Signal to the compiler that they are the same in all threads
// and can be stored in warp-uniform registers (Sm75+)
num_queries = warp_uniform(num_queries);
num_keys = warp_uniform(num_keys);
custom_mask_type = warp_uniform(custom_mask_type);
query_ptr = warp_uniform(query_ptr);
key_ptr = warp_uniform(key_ptr);
value_ptr = warp_uniform(value_ptr);
bias_ptr = warp_uniform(bias_ptr);
logsumexp_ptr = warp_uniform(logsumexp_ptr);
output_ptr = warp_uniform(output_ptr);
grad_output_ptr = warp_uniform(grad_output_ptr);
delta_ptr = warp_uniform(delta_ptr);
grad_query_ptr = warp_uniform(grad_query_ptr);
grad_key_ptr = warp_uniform(grad_key_ptr);
grad_value_ptr = warp_uniform(grad_value_ptr);
grad_bias_ptr = warp_uniform(grad_bias_ptr);
#if 0
PRINT_T0("[b:%d h:%d] dp[0]:%f Q:%f K:%f V:%f LSE:%f",
int(blockIdx.z), int(blockIdx.y),
float(delta_ptr[0]),
float(query_ptr[0]), float(key_ptr[0]), float(value_ptr[0]),
float(logsumexp_ptr[0])
)
#endif
return true;
}
__host__ dim3 getBlocksGrid() const {
return dim3(num_splits_key, num_heads, num_batches);
}
__host__ dim3 getThreadsGrid() const {
return dim3(kWarpSize * kNumWarpsPerBlock, 1, 1);
}
CUTLASS_HOST_DEVICE int64_t workspace_elements_gk() const {
if (!kNeedsAccumGradK) {
return 0;
}
return num_splits_key * align_up(num_keys, (int32_t)kBlockSizeJ) *
align_up(head_dim, (int32_t)kBlockSizeI);
}
CUTLASS_HOST_DEVICE int64_t workspace_elements_gv() const {
if (!kNeedsAccumGradV) {
return 0;
}
return num_splits_key * align_up(num_keys, (int32_t)kBlockSizeJ) *
align_up(head_dim_value, (int32_t)kBlockSizeI);
}
CUTLASS_HOST_DEVICE int64_t workspace_elements_gq() const {
if (!kNeedsAccumGradQ) {
return 0;
}
int num_blocks = ceil_div(num_queries, kBlockSizeI);
int num_cols = ceil_div(head_dim, MatmulGradQ::ThreadblockShape::kN);
return num_blocks * num_cols * sizeof(GradQTempStorage) /
sizeof(output_accum_t);
}
CUTLASS_HOST_DEVICE int64_t workspace_strideBH() const {
// Aligned on 128bits
return align_up(
workspace_elements_gk() + workspace_elements_gv() +
workspace_elements_gq(),
int64_t(4));
}
CUTLASS_HOST_DEVICE int64_t workspace_size() const {
// Returns size of buffer we need to run this kernel
return num_batches * num_heads * workspace_strideBH() * sizeof(float);
}
CUTLASS_HOST_DEVICE bool should_zero_workspace() const {
return num_splits_key > 1;
}
};
// shared storage for keeping Zij matrix. not needed if we aren't using
// dropout, in which case we use an empty array to save shared memory
using ZijSharedStorage = typename cutlass::platform::conditional<
kApplyDropout,
typename MatmulQK::AccumulatorSharedStorage,
// dummy shared storage object that takes up no space.
typename cutlass::gemm::threadblock::AccumulatorSharedStorage<
#ifdef _WIN32
// windows builds throw the error:
// "type containing an unknown-size array is not allowed"
// if we try to make Zij shared storage zero-sized.
// To get around this just make it sized 1 on windows.
typename cutlass::gemm::GemmShape<1, 1, 0>,
#else
typename cutlass::gemm::GemmShape<0, 0, 0>,
#endif
typename MatmulQK::AccumulatorSharedStorage::Element,
typename MatmulQK::AccumulatorSharedStorage::Layout,
typename cutlass::MatrixShape<0, 0>>>::type;
struct SharedStoragePrologue {
struct {
cutlass::Array<accum_t, kBlockSizeI> di; // (do_i * o_i).sum(-1)
typename MatmulQK::Mma::SharedStorageA mm_qk_k;
} persistent;
union {
struct {
// part1 - after Q.K / dV / dO.V
union {
// 1. efficient load of bias tile Bij, which is then applied to Pij
typename MatmulQK::BiasLoader::SmemTile bias;
// 4. store Pij. it is needed:
// - in dVj += (Pij.T * Zij) @ dOi
// - in dSij = Pij * (dPij - Di)
// 6. dVj += (Pij.T * Zij) @ dOi
// 10. write to fragment
typename MatmulQK::AccumulatorSharedStorage attn_shared_storage;
};
// 5. store Zij. it is needed in dVj += (Pij.T * Zij) @ dOi
ZijSharedStorage zij;
union {
// 2. prologue for dVj
// 6. workspace for dVj += (Pij.T * Zij) @ dOi
typename MatmulGradV::Mma::SharedStorage mm_gradV;
// 7. dVj epilogue
typename MatmulGradV::DefaultEpilogue::SharedStorage gradV_epilogue;
};
// 3. prologue for dPij_dropped
// 8. used in dPij_dropped = dOi @ Vj.T
typename MatmulDOIVJ::Mma::SharedStorage mm_doivj;
} part1;
struct {
// part2 - dQ
union {
typename MatmulQK::AccumulatorSharedStorage
tmpT_shared_storage; // (from part1)
typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage;
};
typename MatmulGradK::Mma::SharedStorage mm_gradK; // (preload)
typename MatmulGradQ::Mma::SharedStorage mm_gradQ; // (preload)
union {
// store dB = dSij to global memory
typename MatmulDOIVJ::BiasGradEpilogue::SharedStorage gradB_epilogue;
typename MatmulGradQ::DefaultEpilogue::SharedStorage gradQ_epilogue;
};
} part2;
struct {
// part3 - after last iteration on dQ's epilogue / dK
union {
typename MatmulQK::AccumulatorSharedStorage
tmpT_shared_storage; // (from part1)
typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage;
};
typename MatmulGradK::Mma::SharedStorage mm_gradK; // (preload)
typename MatmulGradQ::DefaultEpilogue::SharedStorage
gradQ_epilogue_lastIter;
typename MatmulGradK::DefaultEpilogue::SharedStorage gradK_epilogue;
} part3;
struct {
// part4 - after last iteration on dK's epilogue / preload next K.Q_t
typename MatmulQK::Mma::SharedStorageB mm_qk_q;
// If we reach end of current key, dump RF->gmem with "final" epilogues
typename MatmulGradK::DefaultEpilogue::SharedStorage
gradK_epilogue_final;
typename MatmulGradV::DefaultEpilogue::SharedStorage
gradV_epilogue_final;
} part4;
};
static void print_size() {
// Field size
#define FSZ(f) int((sizeof(((SharedStoragePrologue*)0)->f)))
printf("Total smem: %d bytes\n", int(sizeof(SharedStoragePrologue)));
printf(" persistent: %db\n", FSZ(persistent));
printf(" mm_qk_k: %db\n", FSZ(persistent.mm_qk_k));
printf(" part1: %db\n", FSZ(part1));
printf(" bias: %db\n", FSZ(part1.bias));
printf(" attn_shared_storage: %db\n", FSZ(part1.attn_shared_storage));
printf(" zij: %db\n", FSZ(part1.zij));
printf(" mm_gradV: %db\n", FSZ(part1.mm_gradV));
printf(" gradV_epilogue: %db\n", FSZ(part1.gradV_epilogue));
printf(" mm_doivj: %db\n", FSZ(part1.mm_doivj));
printf(" part2: %db\n", FSZ(part2));
printf(" tmpT_shared_storage: %db\n", FSZ(part2.tmpT_shared_storage));
printf(" tmp_shared_storage: %db\n", FSZ(part2.tmp_shared_storage));
printf(" mm_gradK: %db\n", FSZ(part2.mm_gradK));
printf(" mm_gradQ: %db\n", FSZ(part2.mm_gradQ));
printf(" gradB_epilogue: %db\n", FSZ(part2.gradB_epilogue));
printf(" gradQ_epilogue: %db\n", FSZ(part2.gradQ_epilogue));
printf(" part3: %db\n", FSZ(part3));
printf(" tmpT_shared_storage: %db\n", FSZ(part3.tmpT_shared_storage));
printf(" part4: %db\n", FSZ(part4));
printf(" mm_qk_q: %db\n", FSZ(part4.mm_qk_q));
printf(
" gradK_epilogue_final: %db\n", FSZ(part4.gradK_epilogue_final));
printf(
" gradV_epilogue_final: %db\n", FSZ(part4.gradV_epilogue_final));
}
// ===========================================
#define FIELD(INSIDE_STRUCT, FIELDNAME) \
CUTLASS_DEVICE auto& FIELDNAME() { \
return INSIDE_STRUCT.FIELDNAME; \
}
FIELD(persistent, di)
FIELD(persistent, mm_qk_k)
FIELD(part1, bias)
FIELD(part1, attn_shared_storage)
FIELD(part1, zij)
FIELD(part1, mm_gradV)
FIELD(part1, gradV_epilogue)
FIELD(part1, mm_doivj)
FIELD(part2, mm_gradK)
FIELD(part2, mm_gradQ)
FIELD(part2, gradB_epilogue)
FIELD(part2, gradQ_epilogue)
FIELD(part2, tmp_shared_storage)
FIELD(part3, tmpT_shared_storage)
FIELD(part3, gradQ_epilogue_lastIter)
FIELD(part3, gradK_epilogue)
FIELD(part4, mm_qk_q)
FIELD(part4, gradK_epilogue_final)
FIELD(part4, gradV_epilogue_final)
};
struct SharedStorageNoPrologue {
struct {
cutlass::Array<accum_t, kBlockSizeI> di; // (do_i * o_i).sum(-1)
} persistent;
union {
struct {
// part1 - Q.K matmul
typename MatmulQK::Mma::SharedStorageA mm_qk_k;
typename MatmulQK::Mma::SharedStorageB mm_qk_q;
} part1;
struct {
// part2 - compute gradV
union {
// 1. efficient load of bias tile Bij, which is then applied to Pij
typename MatmulQK::BiasLoader::SmemTile bias;
// 2. store Pij to shared memory. it is needed:
// - in this step, where it is used in dVj += (Pij.T * Zij) @ dOi
// - in next step where it is used in dSij = Pij * (dPij - Di)
typename MatmulQK::AccumulatorSharedStorage attn_shared_storage;
};
// 3. store Zij. it is needed in this step, where it is used
// to compute Pij_dropped = Pij * Zij on the fly as fragments of Pij are
// loaded for the computation of dVj.
ZijSharedStorage zij;
union {
typename MatmulGradV::Mma::SharedStorage mm_gradV;
typename MatmulGradV::DefaultEpilogue::SharedStorage gradV_epilogue;
};
} part2;
struct {
// part3 - DO.V matmul
union {
// first compute dPij = (dOi @ Vj.T) * Zij
// and dSij = Pij * (dPij - Di)
struct {
// (from part2) - Pij for computing dSij = Pij * (dPij - Di)
typename MatmulQK::AccumulatorSharedStorage attn_shared_storage;
// matmul to compute dOiVj
typename MatmulDOIVJ::Mma::SharedStorage mm_doivj;
};
// then store dB = dSij to global memory
typename MatmulDOIVJ::BiasGradEpilogue::SharedStorage gradB_epilogue;
};
} part3;
struct {
// part4 - compute gradQ
typename MatmulQK::AccumulatorSharedStorage
tmpT_shared_storage; // (from part2)
typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage;
union {
typename MatmulGradQ::Mma::SharedStorage mm_gradQ;
typename MatmulGradQ::DefaultEpilogue::SharedStorage gradQ_epilogue;
typename MatmulGradQ::DefaultEpilogue::SharedStorage
gradQ_epilogue_lastIter;
};
} part4;
struct {
// part5 - compute gradK
typename MatmulQK::AccumulatorSharedStorage
tmpT_shared_storage; // (from part2)
typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage;
union {
typename MatmulGradK::Mma::SharedStorage mm_gradK;
typename MatmulGradK::DefaultEpilogue::SharedStorage gradK_epilogue;
};
} part5;
struct {
// part6 - store RF accumulated into gmem
typename MatmulGradK::DefaultEpilogue::SharedStorage
gradK_epilogue_final;
typename MatmulGradV::DefaultEpilogue::SharedStorage
gradV_epilogue_final;
} part6;
};
static void print_size() {
#define FIELD_SIZEOF(f) int((sizeof(((SharedStorageNoPrologue*)0)->f)))
printf("Total smem: %d bytes\n", int(sizeof(SharedStorageNoPrologue)));
printf(" persistent: %db\n", FIELD_SIZEOF(persistent));
printf(" part1: %db\n", FIELD_SIZEOF(part1));
printf(" part2: %db\n", FIELD_SIZEOF(part2));
printf(" part3: %db\n", FIELD_SIZEOF(part3));
printf(" part4: %db\n", FIELD_SIZEOF(part4));
printf(" part5: %db\n", FIELD_SIZEOF(part5));
printf(" part6: %db\n", FIELD_SIZEOF(part6));
}
// ===========================================
#define FIELD(INSIDE_STRUCT, FIELDNAME) \
CUTLASS_DEVICE auto& FIELDNAME() { \
return INSIDE_STRUCT.FIELDNAME; \
}
FIELD(persistent, di)
FIELD(part1, mm_qk_k)
FIELD(part1, mm_qk_q)
FIELD(part2, bias)
FIELD(part2, attn_shared_storage)
FIELD(part2, zij)
FIELD(part2, mm_gradV)
FIELD(part2, gradV_epilogue)
FIELD(part3, mm_doivj)
FIELD(part3, gradB_epilogue)
FIELD(part4, tmpT_shared_storage)
FIELD(part4, tmp_shared_storage)
FIELD(part4, mm_gradQ)
FIELD(part4, gradQ_epilogue)
FIELD(part4, gradQ_epilogue_lastIter)
FIELD(part5, mm_gradK)
FIELD(part5, gradK_epilogue)
FIELD(part6, gradK_epilogue_final)
FIELD(part6, gradV_epilogue_final)
};
using SharedStorage = typename cutlass::platform::conditional<
kPreload,
SharedStoragePrologue,
SharedStorageNoPrologue>::type;
struct OutputFragments {
typename MatmulGradV::Mma::FragmentC gradV;
typename MatmulGradK::Mma::FragmentC gradK;
CUTLASS_DEVICE void clear() {
gradV.clear();
gradK.clear();
}
};
static bool __host__ check_supported(Params const& p) {
CHECK_ALIGNED_PTR(p.query_ptr, kMinimumAlignment);
CHECK_ALIGNED_PTR(p.key_ptr, kMinimumAlignment);
CHECK_ALIGNED_PTR(p.value_ptr, kMinimumAlignment);
CHECK_ALIGNED_PTR(p.output_ptr, kMinimumAlignment);
CHECK_ALIGNED_PTR(p.grad_output_ptr, kMinimumAlignment);
CHECK_ALIGNED_PTR(p.bias_ptr, kMinimumAlignment);
XFORMERS_CHECK(p.lse_strideH % 8 == 0, "LSE is not correctly aligned");
XFORMERS_CHECK(p.lse_strideB % 8 == 0, "LSE is not correctly aligned");
XFORMERS_CHECK(
p.num_heads <= 1 || p.q_strideH % kMinimumAlignment == 0,
"query is not correctly aligned (strideH)");
XFORMERS_CHECK(
p.num_heads <= 1 || p.k_strideH % kMinimumAlignment == 0,
"key is not correctly aligned (strideH)");
XFORMERS_CHECK(
p.num_heads <= 1 || p.v_strideH % kMinimumAlignment == 0,
"value is not correctly aligned (strideH)");
XFORMERS_CHECK(
p.num_batches <= 1 || p.q_strideB % kMinimumAlignment == 0,
"query is not correctly aligned (strideB)");
XFORMERS_CHECK(
p.num_batches <= 1 || p.k_strideB % kMinimumAlignment == 0,
"key is not correctly aligned (strideB)");
XFORMERS_CHECK(
p.num_batches <= 1 || p.v_strideB % kMinimumAlignment == 0,
"value is not correctly aligned (strideB)");
XFORMERS_CHECK(
p.q_strideM % kMinimumAlignment == 0,
"query is not correctly aligned (strideM)");
XFORMERS_CHECK(
p.k_strideM % kMinimumAlignment == 0,
"key is not correctly aligned (strideM)");
XFORMERS_CHECK(
p.v_strideM % kMinimumAlignment == 0,
"value is not correctly aligned (strideM)");
if (p.bias_ptr) {
XFORMERS_CHECK(
p.num_batches <= 1 || p.bias_strideB % kMinimumAlignment == 0,
"attn_bias is not correctly aligned (strideB)");
XFORMERS_CHECK(
p.num_heads <= 1 || p.bias_strideH % kMinimumAlignment == 0,
"attn_bias is not correctly aligned (strideH)");
XFORMERS_CHECK(
p.bias_strideM % kMinimumAlignment == 0,
"attn_bias is not correctly aligned (strideM)");
}
if (p.grad_bias_ptr) {
XFORMERS_CHECK(
p.num_batches <= 1 || p.gB_strideB % kMinimumAlignment == 0,
"attn_bias.grad is not correctly aligned (strideB)");
XFORMERS_CHECK(
p.num_heads <= 1 || p.gB_strideH % kMinimumAlignment == 0,
"attn_bias.grad is not correctly aligned (strideH)");
XFORMERS_CHECK(
p.gB_strideM % kMinimumAlignment == 0,
"attn_bias.grad is not correctly aligned (strideM)");
}
XFORMERS_CHECK(
!(p.cu_seqlens_q_ptr && p.bias_ptr),
"CuSeqlen + bias not implemented yet");
XFORMERS_CHECK(
p.custom_mask_type < NumCustomMaskTypes,
"Invalid value for `custom_mask_type`");
XFORMERS_CHECK(
p.dropout_prob <= 1.0f && p.dropout_prob >= 0.0f,
"Invalid value for `dropout_prob`");
XFORMERS_CHECK(
kApplyDropout || p.dropout_prob == 0.0f,
"Set `kApplyDropout`=True to support `dropout_prob > 0`");
XFORMERS_CHECK(p.head_dim > 0, "Invalid value for `head_dim`");
XFORMERS_CHECK(p.head_dim_value > 0, "Invalid value for `head_dim_value`");
XFORMERS_CHECK(p.num_queries > 0, "Invalid value for `num_queries`");
XFORMERS_CHECK(p.num_keys > 0, "Invalid value for `num_keys`");
XFORMERS_CHECK(p.num_heads > 0, "Invalid value for `num_heads`");
XFORMERS_CHECK(p.num_batches > 0, "Invalid value for `num_batches`");
XFORMERS_CHECK(p.head_dim <= kMaxK, "kMaxK: Expected `head_dim < kMaxK`");
XFORMERS_CHECK(
p.head_dim_value <= kMaxK, "kMaxK: Expected `head_dim_value < kMaxK`");
if (kKeysQueriesAlignedToBlockSize) {
XFORMERS_CHECK(
p.cu_seqlens_k_ptr == nullptr,
"This kernel does not support cu_seqlen");
XFORMERS_CHECK(
p.cu_seqlens_q_ptr == nullptr,
"This kernel does not support cu_seqlen");
XFORMERS_CHECK(
p.num_queries % kBlockSizeI == 0,
"kKeysQueriesAlignedToBlockSize condition not respected");
XFORMERS_CHECK(
p.num_keys % kBlockSizeJ == 0,
"kKeysQueriesAlignedToBlockSize condition not respected");
}
XFORMERS_CHECK(
kEnableSplitKeys || p.num_splits_key == 1, "SplitKeys is disabled");
XFORMERS_CHECK(
p.num_splits_key > 0, "Invalid `num_splits_key` (expected >0)");
XFORMERS_CHECK(
p.num_splits_key <= cutlass::ceil_div(p.num_keys, kBlockSizeJ),
"Invalid `num_splits_key` (too large)");
return true;
}
static CUTLASS_DEVICE void attention_kernel(Params p) {
extern __shared__ char smem_buffer[];
SharedStorage& shared_storage = *((SharedStorage*)smem_buffer);
uint16_t thread_id = threadIdx.x;
uint8_t warp_id = warp_uniform(thread_id / 32);
uint8_t lane_id = thread_id % 32;
int32_t key_start = p.split_key_device() * kBlockSizeJ;
if (key_start >= p.num_keys) {
return;
}
if (kPrologueQK) {
int32_t query_start = getQueryStart(p, key_start);
prologueQkNextIteration<true>(
shared_storage, p, query_start, key_start, warp_id, lane_id);
}
// Computes (dO*out).sum(-1) and writes it to `p.delta_ptr`
if (kKernelComputesDelta) {
constexpr int kOptimalElements =
128 / cutlass::sizeof_bits<scalar_t>::value;
if (p.head_dim_value % kOptimalElements == 0) {
for (int query_start = 0; query_start < p.num_queries;
query_start += kBlockSizeI) {
computeDelta<kOptimalElements>(p, query_start, warp_id, lane_id);
}
} else {
for (int query_start = 0; query_start < p.num_queries;
query_start += kBlockSizeI) {
computeDelta<1>(p, query_start, warp_id, lane_id);
}
}
__syncthreads();
}
OutputFragments output_frags;
curandStatePhilox4_32_10_t rng_state_init;
#ifdef HAS_PYTORCH
if (kApplyDropout) {
auto seeds = at::cuda::philox::unpack(p.rng_engine_inputs);
// each element of the attention matrix P with shape
// (batch_sz, n_heads, n_queries, n_keys) is associated with a single
// offset in RNG sequence. we initialize the RNG state with offset that
// starts at the beginning of a (n_queries, n_keys) matrix for this
// block's batch_id and head_id
// initializing rng state is very expensive, so we run once per kernel,
// rather than once per iteration. each iteration takes a copy of the
// initialized RNG state and offsets it as needed.
curand_init(
std::get<0>(seeds),
0,
std::get<1>(seeds) + p.dropout_batch_head_rng_offset,
&rng_state_init);
}
#endif
CUTLASS_PRAGMA_UNROLL
for (; key_start < p.num_keys;
key_start += p.num_splits_key_device() * kBlockSizeJ) {
output_frags.clear();
CUTLASS_PRAGMA_UNROLL
for (int32_t query_start_shifted = getQueryStart(p, key_start);
query_start_shifted < getQueryStartShift(p) + getQueryEnd(p);
query_start_shifted += kBlockSizeI) {
// This line here
// vvvvvvvvvvvvvv
warp_id = warp_uniform(warp_id);
// ^^^^^^^^^^^^^^
// ... makes everything use less RF and be 10% faster. Why?
// I don't know. My theory is that it forces `nvcc` to
// re-compute indices, offsets etc... and not keep them
// from the previous iteration, which prevents MASSIVE
// register spilling.
int32_t query_start = query_start_shifted;
if (query_start >= p.num_queries) {
query_start = query_start % getQueryEnd(p);
}
processBlockIJ<kKeysQueriesAlignedToBlockSize>(
shared_storage,
output_frags,
p,
query_start,
key_start,
rng_state_init,
warp_id,
lane_id);
}
if (kOutputInRF) {
writeFragsToGmem<kKeysQueriesAlignedToBlockSize>(
shared_storage, output_frags, p, key_start, warp_id, lane_id);
} else if (getQueryStart(p, key_start) >= p.num_queries) {
zfillGradKV<kKeysQueriesAlignedToBlockSize>(
p, key_start, warp_id, lane_id);
}
__syncthreads();
}
}
template <bool skipBoundsChecks>
static CUTLASS_DEVICE void zfillGradKV(
Params const& p,
int32_t key_start,
uint8_t warp_id,
uint8_t lane_id) {
constexpr int kThreadsPerKey = 8;
constexpr int kParallelKeys = kNumThreads / kThreadsPerKey;
static_assert(kBlockSizeJ % kParallelKeys == 0, "");
// This function is not really optimized, but should rarely be used
// It's only used when some keys are "useless" and don't attend to
// any query, due to causal masking
int thread_id = 32 * warp_id + lane_id;
int k_shift = lane_id % kThreadsPerKey;
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kBlockSizeJ; j += kParallelKeys) {
int key = key_start + j + (thread_id / kThreadsPerKey);
if (!skipBoundsChecks && key >= p.num_keys) {
continue;
}
auto gv_ptr = p.grad_value_ptr + key * p.gV_strideM();
auto gk_ptr = p.grad_key_ptr + key * p.gK_strideM();
for (int k = k_shift; k < p.head_dim_value; k += kThreadsPerKey) {
gv_ptr[k] = scalar_t(0);
}
for (int k = k_shift; k < p.head_dim; k += kThreadsPerKey) {
gk_ptr[k] = scalar_t(0);
}
}
}
template <bool skipBoundsChecks>
static CUTLASS_DEVICE void processBlockIJ(
SharedStorage& shared_storage,
OutputFragments& output_frags,
Params& p,
int32_t query_start,
int32_t key_start,
const curandStatePhilox4_32_10_t& curand_state_init,
uint8_t warp_id,
uint8_t lane_id) {
cutlass::Array<cutlass::uint1b_t, MatmulDOIVJ::Mma::FragmentC::kElements>
dropout_keep_mask_doivj;
dropout_keep_mask_doivj.fill(cutlass::uint1b_t{1});
const float dropout_scale =
kApplyDropout ? 1.0 / (1.0 - p.dropout_prob) : 1.0f;
cutlass::MatrixCoord no_offset{0, 0};
accum_t scale = p.scale;
int16_t thread_id = 32 * warp_id + lane_id;
auto rematerializeThreadIds = [&]() {
// Prevents `nvcc` from keeping values deduced from
// `thread_id`, `warp_id`, ... in RF - to reduce register pressure
warp_id = warp_uniform(thread_id / 32);
lane_id = thread_id % 32;
thread_id = 32 * warp_id + lane_id;
};
bool isFirstQuery = (query_start == getQueryStart(p, key_start));
int32_t next_query, next_key;
incrIteration(p, query_start, key_start, next_query, next_key);
bool isLastQuery = next_key != key_start;
accum_t di_rf = accum_t(0);
if (thread_id < kBlockSizeI) {
if (query_start + thread_id < p.num_queries) {
di_rf = p.delta_ptr[query_start + thread_id];
}
shared_storage.di()[thread_id] = di_rf;
}
int32_t num_queries_in_block = skipBoundsChecks
? MatmulQK::Mma::Shape::kN
: warp_uniform(cutlass::fast_min(
(int32_t)MatmulQK::Mma::Shape::kN, p.num_queries - query_start));
int32_t num_keys_in_block = skipBoundsChecks
? MatmulQK::Mma::Shape::kM
: warp_uniform(cutlass::fast_min(
(int32_t)MatmulQK::Mma::Shape::kM, p.num_keys - key_start));
auto prologueGradV = [&](int col) {
typename MatmulGradV::Mma::IteratorB iterator_dO(
{int32_t(p.gO_strideM)},
p.grad_output_ptr + query_start * p.gO_strideM + col,
{num_queries_in_block, p.head_dim_value - col},
thread_id,
no_offset);
MatmulGradV::Mma::prologue(
shared_storage.mm_gradV(),
iterator_dO,
thread_id,
num_queries_in_block);
};
auto prologueGradQ = [&](int col) {
typename MatmulGradQ::Mma::IteratorB iterator_K(
{int32_t(p.k_strideM)},
p.key_ptr + key_start * p.k_strideM + col,
{num_keys_in_block, p.head_dim - col},
thread_id,
no_offset);
MatmulGradQ::Mma::prologue(
shared_storage.mm_gradQ(), iterator_K, thread_id, num_keys_in_block);
};
auto prologueGradK = [&](int col) {
typename MatmulGradK::Mma::IteratorB iterator_Q(
{int32_t(p.q_strideM)},
p.query_ptr + query_start * p.q_strideM + col,
{num_queries_in_block, p.head_dim - col},
thread_id,
no_offset);
MatmulGradK::Mma::prologue(
shared_storage.mm_gradK(),
iterator_Q,
thread_id,
num_queries_in_block);
};
auto prologueDOV = [&]() {
typename MatmulDOIVJ::Mma::IteratorA iterator_A(
{int32_t(p.gO_strideM)},
p.grad_output_ptr + query_start * p.gO_strideM,
{num_queries_in_block, p.head_dim_value},
thread_id,
no_offset);
typename MatmulDOIVJ::Mma::IteratorB iterator_B(
{int32_t(p.v_strideM)},
p.value_ptr + key_start * p.v_strideM,
{p.head_dim_value, num_keys_in_block},
thread_id,
no_offset);
MatmulDOIVJ::Mma::prologue(
shared_storage.mm_doivj(),
iterator_A,
iterator_B,
thread_id,
p.head_dim_value);
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// MatmulQK
/////////////////////////////////////////////////////////////////////////////////////////////////
{
using Mma = typename MatmulQK::Mma;
cutlass::gemm::GemmCoord problem_size(
num_keys_in_block,
num_queries_in_block,
p.head_dim // k
);
// k_j
typename Mma::IteratorA iterator_A(
{int32_t(p.k_strideM)},
p.key_ptr + key_start * p.k_strideM,
{problem_size.m(), problem_size.k()},
thread_id,
no_offset);
// q_i.transpose(-2, -1)
typename Mma::IteratorB iterator_B(
{int32_t(p.q_strideM)},
p.query_ptr + query_start * p.q_strideM,
{problem_size.k(), problem_size.n()},
thread_id,
no_offset);
Mma mma(
shared_storage.mm_qk_k(),
shared_storage.mm_qk_q(),
thread_id,
warp_id,
lane_id);
typename Mma::FragmentC accum;
accum.clear();
auto gemm_k_iterations =
(problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma.set_prologue_done(kPrologueQK);
mma.set_zero_outside_bounds(!skipBoundsChecks);
mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum);
accum = cutlass::multiplies<typename Mma::FragmentC>()(scale, accum);
// Epilogue: add LSE + exp and store that to our shared memory buffer
// shmem <- (matmul_result -
// logsumexp[i_start:i_end].unsqueeze(1)).exp()
int warp_idx_mn_0 =
warp_id % (Mma::Base::WarpCount::kM * Mma::Base::WarpCount::kN);
auto output_tile_coords = cutlass::MatrixCoord{
warp_idx_mn_0 % Mma::Base::WarpCount::kM,
warp_idx_mn_0 / Mma::Base::WarpCount::kM};
// apply bias if applicable
if (p.bias_ptr != nullptr) {
// load bias tile Bij into shared memory
typename MatmulQK::BiasLoader::GmemTileIterator bias_iter(
{cutlass::layout::RowMajor(p.bias_strideM)},
p.bias_ptr + query_start * p.bias_strideM + key_start,
{num_queries_in_block, num_keys_in_block},
thread_id);
cutlass::TensorRef<scalar_t, cutlass::layout::RowMajor> bias_tensor_ref(
shared_storage.bias().data(),
cutlass::layout::RowMajor(MatmulQK::ThreadblockShape::kM));
typename MatmulQK::BiasLoader::SmemTileIterator smem_tile_iter(
bias_tensor_ref, thread_id);
MatmulQK::BiasLoader::load(bias_iter, smem_tile_iter);
// Pij += Bij, where Pij is in register fragment and Bij is in shmem
auto lane_offset = MatmulQK::AccumLambdaIterator::get_lane_offset(
lane_id, warp_id, output_tile_coords);
MatmulQK::AccumLambdaIterator::iterateRows(
lane_offset,
[&](int accum_n) {},
[&](int accum_m, int accum_n, int idx) {
// remember we are transposed
accum[idx] += bias_tensor_ref.at({accum_n, accum_m});
},
[&](int accum_n) {});
}
// Apply mask
if (p.custom_mask_type == CausalFromTopLeft ||
p.custom_mask_type == CausalFromBottomRight) {
auto lane_offset = MatmulQK::AccumLambdaIterator::get_lane_offset(
lane_id, warp_id, output_tile_coords);
int shift = query_start - key_start;
if (p.custom_mask_type == CausalFromBottomRight) {
shift += p.num_keys - p.num_queries;
}
// current_key = key_start + accum_m
// current_query = query_start + accum_n
// mask if: `current_key > current_query`
MatmulQK::AccumLambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {},
[&](int accum_m, int accum_n, int idx) {
if (accum_m > accum_n + shift) {
accum[idx] =
-cutlass::platform::numeric_limits<accum_t>::infinity();
}
},
[&](int accum_m) {});
}
__syncthreads();
if (kPrologueGV) {
prologueGradV(0);
}
if (kPrologueDOV) {
prologueDOV();
}
MatmulQK::B2bGemm::accumApplyLSEToSmem(
shared_storage.attn_shared_storage(),
accum,
p.logsumexp_ptr + query_start,
problem_size.n(),
thread_id,
warp_id,
lane_id,
output_tile_coords);
#if 0
auto accum_ref_attnT = shared_storage.attn_shared_storage().accum_ref();
PRINT_TENSOR4x4_T0_L0("attn_T", accum_ref_attnT);
#endif
// if we are using dropout, compute Zij, writing it to shared memory.
// each element of Zij is:
// - 0 with probability dropout_p
// - 1 / (1 - dropout_p) with probability 1 - dropout_p
if (kApplyDropout) {
auto zij = shared_storage.zij().accum_ref();
// each thread generates a contiguous sequence of elements in Zij, all
// in the same row. the reason they have to come from the same row is
// that sampling random numbers from a contiguous random number sequence
// is much more efficient than jumping around, and the linear offset of
// each element of Z (the global matrix) maps to an offset in a random
// number sequence. for Z, the end of a row and the beginning of the
// next have adjacent offsets, but for Zij (tile of global matrix), this
// is not necessarily the case.
// We must fill the entire `zij` shmem with values (even out of bounds
// on the K-dimension) otherwise we can get NaNs during the GEMM
const int kQueriesPerBlock = kBlockSizeI;
const int threads_per_row = cutlass::fast_min(
int32_t(kNumThreads / kQueriesPerBlock), num_keys_in_block);
const int elts_per_thread = cutlass::round_nearest(
cutlass::ceil_div(num_keys_in_block, threads_per_row), 4);
const int thread_i = thread_id / threads_per_row;
const int thread_start_j =
(thread_id % threads_per_row) * elts_per_thread;
if (thread_i < kQueriesPerBlock && thread_start_j < num_keys_in_block) {
curandStatePhilox4_32_10_t curand_state = curand_state_init;
skipahead(
(query_start + thread_i) * p.num_keys +
(key_start + thread_start_j),
&curand_state);
// generate elements of Zij, 4 elements at a time
for (int zij_start_col_idx = thread_start_j; zij_start_col_idx <
cutlass::fast_min<int32_t>(thread_start_j + elts_per_thread,
num_keys_in_block);
zij_start_col_idx += 4) {
const float4 rand_uniform_quad = curand_uniform4(&curand_state);
CUTLASS_PRAGMA_UNROLL
for (int quad_idx = 0; quad_idx < 4; ++quad_idx) {
// we'll write Zij transposed since attention is also transposed
// during the matmul to compute dV.
zij.at({zij_start_col_idx + quad_idx /*k*/, thread_i /*q*/}) =
(&rand_uniform_quad.x)[quad_idx] > p.dropout_prob
? scalar_t(dropout_scale)
: scalar_t(0);
}
}
}
__syncthreads();
#if 0
PRINT_TENSOR4x4_T0_L0("zij", zij);
PRINT_TENSOR4x4_T0_L0_START("zij", zij, kBlockSizeJ - 4, kBlockSizeI - 4);
#endif
// Save mask for later DOIVJ matmul
int warp_idx_mn_0 = warp_id %
(MatmulDOIVJ::Mma::Base::WarpCount::kM *
MatmulDOIVJ::Mma::Base::WarpCount::kN);
auto output_tile_coords_doivj = cutlass::MatrixCoord{
warp_idx_mn_0 % MatmulDOIVJ::Mma::Base::WarpCount::kM,
warp_idx_mn_0 / MatmulDOIVJ::Mma::Base::WarpCount::kM};
auto lane_offset = MatmulDOIVJ::AccumLambdaIterator::get_lane_offset(
lane_id, warp_id, output_tile_coords_doivj);
MatmulDOIVJ::AccumLambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {},
[&](int accum_m /*q*/, int accum_n /*k*/, int idx) {
if (zij.at({accum_n, accum_m}) == scalar_t(0)) {
dropout_keep_mask_doivj[idx] = cutlass::uint1b_t{0};
}
},
[&](int accum_m) {});
}
__syncthreads();
}
rematerializeThreadIds();
/////////////////////////////////////////////////////////////////////////////////////////////////
// GradV matmul
//
// grad_v[j_start:j_end] += attn_T @ do_i
/////////////////////////////////////////////////////////////////////////////////////////////////
constexpr bool kSingleIterationGradV =
kMaxK <= MatmulGradV::ThreadblockShape::kN;
for (int col = 0; col < (kSingleIterationGradV ? 1 : p.head_dim_value);
col += MatmulGradV::ThreadblockShape::kN) {
using Mma = typename MatmulGradV::Mma;
using AccumTileGmem = typename MatmulGradQ::AccumTileGmem;
cutlass::gemm::GemmCoord problem_size(
num_keys_in_block, p.head_dim_value - col, num_queries_in_block);
auto createEpilogueIter = [&]() {
return typename MatmulGradV::OutputTileIterator(
typename MatmulGradV::OutputTileIterator::Params{p.gV_strideM()},
p.grad_value_ptr + key_start * p.gV_strideM() + col,
{num_keys_in_block, p.head_dim_value - col},
thread_id);
};
typename Mma::IteratorB iterator_B(
{int32_t(p.gO_strideM)},
p.grad_output_ptr + query_start * p.gO_strideM + col,
{num_queries_in_block, p.head_dim_value - col},
thread_id,
no_offset);
// if dropout: dVj += (Pij.T * Zij) @ dOi
// otherwise: dVj += Pij.T @ dOi
Mma mma(
// operand A: Pij.T
shared_storage.attn_shared_storage().accum_ref(),
// operand A_scale Zij.T:
// if we're using dropout, operand A is Pij_dropped.T = Pij.T * Zij.T
// which is computed on the fly as fragments of Pij.T are loaded in
shared_storage.zij().accum_ref(),
// operand B: dOi - which was loaded into shared memory previously
// when we computed dVj
shared_storage.mm_gradV().operand_B_ref(),
thread_id,
warp_id,
lane_id);
int storage_id = col / MatmulGradV::ThreadblockShape::kN;
AccumTileGmem gmem_tile{
p.workspace_gv + storage_id * AccumTileGmem::kElementsStored};
if (!kOutputInRF) {
if (isFirstQuery || !kNeedsAccumGradV) {
output_frags.gradV.clear();
} else {
gmem_tile.load(output_frags.gradV, thread_id);
}
}
mma.set_prologue_done(kPrologueGV);
auto gemm_k_iterations =
(problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
__syncthreads();
mma(gemm_k_iterations,
output_frags.gradV,
iterator_B,
output_frags.gradV);
__syncthreads();
if (kPrologueGV && !kSingleIterationGradV &&
col + MatmulGradV::ThreadblockShape::kN < p.head_dim_value) {
prologueGradV(col + MatmulGradV::ThreadblockShape::kN);
}
if (!kOutputInRF) {
if (kNeedsAccumGradV && !isLastQuery) {
gmem_tile.store(output_frags.gradV, thread_id);
} else {
accumulateInGmem<MatmulGradV>(
shared_storage.gradV_epilogue(),
output_frags.gradV,
createEpilogueIter(),
isFirstQuery || kNeedsAccumGradV,
warp_id,
lane_id);
}
}
}
__syncthreads();
/////////////////////////////////////////////////////////////////////////////////////////////////
// MatmulDOIVJ
/////////////////////////////////////////////////////////////////////////////////////////////////
{
using Mma = typename MatmulDOIVJ::Mma;
// do_i
typename Mma::IteratorA iterator_A(
{int32_t(p.gO_strideM)},
p.grad_output_ptr + query_start * p.gO_strideM,
{num_queries_in_block, p.head_dim_value},
thread_id,
no_offset);
// v_j.transpose(-2, -1)
typename Mma::IteratorB iterator_B(
{int32_t(p.v_strideM)},
p.value_ptr + key_start * p.v_strideM,
{p.head_dim_value, num_keys_in_block},
thread_id,
no_offset);
Mma mma(shared_storage.mm_doivj(), thread_id, warp_id, lane_id);
mma.set_prologue_done(kPrologueDOV);
mma.set_zero_outside_bounds(!skipBoundsChecks);
typename Mma::FragmentC accum;
accum.clear();
auto gemm_k_iterations =
(p.head_dim_value + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum);
__syncthreads();
if (kPrologueGQ) {
prologueGradQ(0);
}
if (kPrologueGK) {
prologueGradK(0);
}
int warp_idx_mn_0 =
warp_id % (Mma::Base::WarpCount::kM * Mma::Base::WarpCount::kN);
auto output_tile_coords = cutlass::MatrixCoord{
warp_idx_mn_0 % Mma::Base::WarpCount::kM,
warp_idx_mn_0 / Mma::Base::WarpCount::kM};
// TODO: This must be terribly inefficient. There must be a better way
// tmp [RF] <- (accum [RF] - Di [smem] ) * attn_T.T [smem]
// attn_shared_storage [smem] <- tmp.T
// tmp_shared_storage [smem] <- tmp
{
using LambdaIterator = typename MatmulDOIVJ::AccumLambdaIterator;
auto lane_offset = LambdaIterator::get_lane_offset(
lane_id, warp_id, output_tile_coords);
// if dropout was used, compute dPij = dPij_dropped * Zij
if (kApplyDropout) {
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {},
[&](int accum_m, int accum_n, int idx) {
if (dropout_keep_mask_doivj[idx].get()) {
accum[idx] *= dropout_scale;
} else {
accum[idx] = 0;
}
},
[&](int accum_m) {});
}
auto attn_T = shared_storage.attn_shared_storage().accum_ref();
#if 0
PRINT_B0_T0("doivj_dropped");
print_warp_accum<LambdaIterator>(accum, lane_offset, 4, 4);
PRINT_TENSOR4x4_T0_L0("attn_T", attn_T)
#endif
accum_t current_di;
// dSij = (dPij - Di) * Pij
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) { current_di = shared_storage.di()[accum_m]; },
[&](int accum_m, int accum_n, int idx) {
// TODO: Otherwise we can get nans as we
// might have infs here (only seen on f16 tho)
if (skipBoundsChecks ||
(accum_m < num_queries_in_block &&
accum_n < num_keys_in_block)) {
accum_t attn = attn_T.at({accum_n, accum_m});
accum[idx] = (accum[idx] - current_di) * attn;
} else {
accum[idx] = 0;
}
},
[&](int accum_m) {
});
// store bias gradient tile dBij to global memory,
// where dBij = dSij = Pij * (dPij - Di)
if (p.grad_bias_ptr != nullptr) {
typename MatmulDOIVJ::BiasGradEpilogue::OutputTileIterator
output_iter(
typename MatmulDOIVJ::BiasGradEpilogue::OutputTileIterator::
Params{p.gB_strideM},
// grad_bias_ptr is offset to point at beginning of
// matrix of shape (queries, keys) for a given
// (batch_id, head_id) the pointer arithmetic here produces
// a pointer to the start of the current tile within that
// matrix
p.grad_bias_ptr + query_start * p.gB_strideM + key_start,
{num_queries_in_block, num_keys_in_block},
thread_id);
// no-op epilogue operator - just casting and storing contents of
// accum to global memory
typename MatmulDOIVJ::BiasGradEpilogue::OutputOp output_op({1, 1});
typename MatmulDOIVJ::BiasGradEpilogue epilogue(
shared_storage.gradB_epilogue(), thread_id, warp_id, lane_id);
epilogue(output_op, output_iter, accum, output_iter);
}
accum = accum * scale;
#if 0
PRINT_B0_T0("(doivj - di) * attn * scale");
print_warp_accum<LambdaIterator>(accum, lane_offset, 4, 4);
#endif
__syncthreads();
if (!MatmulGradK::DefaultMmaFromSmem::kIsTransposedA) {
auto tmpT = shared_storage.tmpT_shared_storage().accum_ref();
// attn <- attn_T.T
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {},
[&](int accum_m, int accum_n, int idx) {
tmpT.at({accum_n, accum_m}) = scalar_t(accum[idx]);
},
[&](int accum_m) {});
}
}
MatmulDOIVJ::B2bGemm::accumToSmem(
shared_storage.tmp_shared_storage(),
accum,
lane_id,
output_tile_coords);
__syncthreads();
}
// Force `nvcc` to recompute values that depend on the variables just below
// to use less RF and prevent some spilling
p.head_dim = warp_uniform(p.head_dim);
p.k_strideM = warp_uniform(p.k_strideM);
rematerializeThreadIds();
/////////////////////////////////////////////////////////////////////////////////////////////////
// GradQ matmul
//
// grad_q[i_start:i_end] += tmp @ k_j
/////////////////////////////////////////////////////////////////////////////////////////////////
// Skip the loop & associated branches if we know at compile time the number
// of iterations
constexpr bool kSingleIterationGradQ =
kMaxK <= MatmulGradQ::ThreadblockShape::kN;
for (int col = 0; col < (kSingleIterationGradQ ? 1 : p.head_dim);
col += MatmulGradQ::ThreadblockShape::kN) {
using Mma = typename MatmulGradQ::Mma;
using AccumTileGmem = typename MatmulGradQ::AccumTileGmem;
cutlass::gemm::GemmCoord problem_size(
num_queries_in_block,
false ? MatmulGradQ::ThreadblockShape::kN : p.head_dim - col,
num_keys_in_block);
// k_j
typename Mma::IteratorB iterator_B(
{int32_t(p.k_strideM)},
p.key_ptr + key_start * p.k_strideM + col,
{problem_size.k(), problem_size.n()},
thread_id,
no_offset);
auto a = shared_storage.tmp_shared_storage().accum_ref();
Mma mma(
// operand A: dSij
shared_storage.tmp_shared_storage().accum_ref(),
// operand B: Kj
shared_storage.mm_gradQ().operand_B_ref(),
thread_id,
warp_id,
lane_id);
typename Mma::FragmentC accum;
int col_id = col / MatmulGradQ::ThreadblockShape::kN;
int num_cols = kSingleIterationGradQ
? 1
: ceil_div(p.head_dim, MatmulGradQ::ThreadblockShape::kN);
int storage_id = (col_id + query_start / kBlockSizeI * num_cols);
if (p.num_splits_key_device() > 1) {
AtomicLock::acquire(
&p.workspace_gq[storage_id].lock,
p.split_key_device() + 1,
thread_id);
// Make sure we can see other block's output
__threadfence();
}
AccumTileGmem gmem_tile{&p.workspace_gq[storage_id].buffer[0]};
if (!kNeedsAccumGradQ ||
(p.num_splits_key_device() == 1 && key_start == 0)) {
// if we know we are the first to access it, we know it's only zeros.
// Avoids a load from gmem (and gmem init as well)
accum.clear();
} else {
gmem_tile.load(accum, thread_id);
}
auto gemm_k_iterations =
(problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
__syncthreads();
mma.set_prologue_done(kPrologueGQ);
mma(gemm_k_iterations, accum, iterator_B, accum);
__syncthreads();
bool isLastColumn = kSingleIterationGradQ ||
(col + MatmulGradQ::ThreadblockShape::kN >= p.head_dim);
if (kPrologueGQ && !isLastColumn) {
prologueGradQ(col + MatmulGradQ::ThreadblockShape::kN);
}
bool isLast = [&]() {
int32_t next_key = key_start + p.num_splits_key_device() * kBlockSizeJ;
if (p.num_keys <= next_key) {
return true;
}
if (query_start < getSmallestQueryForKey(p, next_key)) {
return true;
}
return false;
}();
// Output results
if (p.num_splits_key_device() > 1) {
int32_t numAddsSoFar = -1;
if (isLast && thread_id == 0) {
numAddsSoFar = atomicAdd(&p.workspace_gq[storage_id].counter, 1) +
1; // `atomicAdd` returns the old value
}
isLast = __syncthreads_or(
numAddsSoFar == getNumParallelBlocksForQuery(p, query_start));
assert(numAddsSoFar <= getNumParallelBlocksForQuery(p, query_start));
}
if (kNeedsAccumGradQ && !isLast) {
gmem_tile.store(accum, thread_id);
if (p.num_splits_key_device() > 1) {
// Make sure everyone wrote before we release the lock
__threadfence();
__syncthreads();
AtomicLock::release(&p.workspace_gq[storage_id].lock, thread_id);
}
} else {
// NOTE: We're not releasing the lock because no one is expected
// to come after us (we're the last one to write)
typename MatmulGradQ::OutputTileIterator output_it(
typename MatmulGradQ::OutputTileIterator::Params{p.gQ_strideM()},
p.grad_query_ptr + query_start * p.gQ_strideM() + col,
{problem_size.m(), problem_size.n()},
thread_id);
bool storage_contains_zeros = kNeedsAccumGradQ || key_start == 0 ||
(p.num_splits_key_device() > 1);
accumulateInGmem<MatmulGradQ>(
isLastColumn ? shared_storage.gradQ_epilogue_lastIter()
: shared_storage.gradQ_epilogue(),
accum,
output_it,
storage_contains_zeros,
warp_id,
lane_id);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// GradK matmul
//
// grad_k[i_start:i_end] += tmp.transpose(-2, -1) @ q_i
/////////////////////////////////////////////////////////////////////////////////////////////////
rematerializeThreadIds();
constexpr bool kSingleIterationGradK =
kMaxK <= MatmulGradK::ThreadblockShape::kN;
for (int col = 0; col < (kSingleIterationGradK ? 1 : p.head_dim);
col += MatmulGradK::ThreadblockShape::kN) {
using Mma = typename MatmulGradK::Mma;
using AccumTileGmem = typename MatmulGradQ::AccumTileGmem;
cutlass::gemm::GemmCoord problem_size(
num_keys_in_block,
false ? MatmulGradK::ThreadblockShape::kN : p.head_dim - col,
num_queries_in_block);
auto createEpilogueIter = [&]() {
return typename MatmulGradK::OutputTileIterator(
typename MatmulGradK::OutputTileIterator::Params{p.gK_strideM()},
p.grad_key_ptr + key_start * p.gK_strideM() + col,
{num_keys_in_block,
false ? MatmulGradK::ThreadblockShape::kN : p.head_dim - col},
thread_id);
};
// q_i
typename Mma::IteratorB iterator_B(
{int32_t(p.q_strideM)},
p.query_ptr + query_start * p.q_strideM + col,
{problem_size.k(), problem_size.n()},
thread_id,
no_offset);
auto getTmp = [&](int) { return &shared_storage.tmp_shared_storage(); };
auto getTmpT = [&](int) { return &shared_storage.tmpT_shared_storage(); };
// this is basically:
// opA = kIsTransposedA ? getTmp() : getTmpT();
bool constexpr kIsTransposedA =
MatmulGradK::DefaultMmaFromSmem::kIsTransposedA;
auto& opA = *call_conditional<
kIsTransposedA,
decltype(getTmp),
decltype(getTmpT)>::apply(getTmp, getTmpT, 0);
Mma mma(
// operand A: dSij.T
opA.accum_ref(),
// operand B: Qi
shared_storage.mm_gradK().operand_B_ref(),
thread_id,
warp_id,
lane_id);
int storage_id = col / MatmulGradK::ThreadblockShape::kN;
AccumTileGmem gmem_tile{
p.workspace + storage_id * AccumTileGmem::kElementsStored};
if (!kOutputInRF) {
if (isFirstQuery || !kNeedsAccumGradK) {
output_frags.gradK.clear();
} else {
gmem_tile.load(output_frags.gradK, thread_id);
}
}
mma.set_prologue_done(kPrologueGK);
auto gemm_k_iterations =
(problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
__syncthreads();
mma(gemm_k_iterations,
output_frags.gradK,
iterator_B,
output_frags.gradK);
__syncthreads();
bool isLastColumn = kSingleIterationGradK ||
col + MatmulGradK::ThreadblockShape::kN >= p.head_dim;
if (kPrologueGK && !isLastColumn) {
prologueGradK(col + MatmulGradK::ThreadblockShape::kN);
}
if (kPrologueQK && isLastColumn) {
int32_t next_query, next_key;
incrIteration(p, query_start, key_start, next_query, next_key);
DISPATCH_BOOL(
next_key != key_start, kForceReloadK, ([&]() {
prologueQkNextIteration<kForceReloadK>(
shared_storage, p, next_query, next_key, warp_id, lane_id);
}));
}
// Output results
if (!kOutputInRF) {
if (kNeedsAccumGradK && !isLastQuery) {
gmem_tile.store(output_frags.gradK, thread_id);
} else {
accumulateInGmem<MatmulGradK>(
isLastColumn ? shared_storage.gradK_epilogue_final()
: shared_storage.gradK_epilogue(),
output_frags.gradK,
createEpilogueIter(),
isFirstQuery || kNeedsAccumGradK,
warp_id,
lane_id);
__syncthreads();
}
}
}
}
static CUTLASS_DEVICE int32_t getQueryStartShift(Params const& p) {
if (p.custom_mask_type == NoCustomMask && p.num_splits_key_device() > 1) {
return (p.split_key_device() * kBlockSizeI) % getQueryEnd(p);
}
return 0;
}
// Iteration order logic
static CUTLASS_DEVICE int32_t
getQueryStart(Params const& p, int32_t key_start) {
return getSmallestQueryForKey(p, key_start) + getQueryStartShift(p);
};
static CUTLASS_DEVICE int32_t getQueryEnd(Params const& p) {
return align_up(p.num_queries, kBlockSizeI);
};
static CUTLASS_DEVICE int32_t
getSmallestQueryForKey(Params const& p, int32_t key_start) {
if (p.custom_mask_type == CausalFromTopLeft) {
return (key_start / kBlockSizeI) * kBlockSizeI;
} else if (p.custom_mask_type == CausalFromBottomRight) {
int first_query =
cutlass::fast_max(0, key_start - p.num_keys + p.num_queries);
return (first_query / kBlockSizeI) * kBlockSizeI;
}
return 0;
};
// Returns how many kernel blocks will write to a given block in `grad_query`
// This is usually equal to the number of key splits, but can be different
// for instance in the causal case, or varying seqlen
static CUTLASS_DEVICE int32_t
getNumParallelBlocksForQuery(Params const& p, int32_t query_start) {
int16_t num_key_blocks = ceil_div(p.num_keys, kBlockSizeJ);
if (p.custom_mask_type == CausalFromTopLeft) {
int32_t last_key_for_block = query_start + kBlockSizeI - 1;
last_key_for_block = cutlass::fast_min(last_key_for_block, p.num_keys);
num_key_blocks = ceil_div(last_key_for_block, kBlockSizeJ);
} else if (p.custom_mask_type == CausalFromBottomRight) {
int32_t last_key_for_block =
query_start + (kBlockSizeI - 1) + (1 + p.num_keys - p.num_queries);
last_key_for_block = cutlass::fast_min(last_key_for_block, p.num_keys);
num_key_blocks = ceil_div(last_key_for_block, kBlockSizeJ);
}
return cutlass::fast_min(p.num_splits_key_device(), num_key_blocks);
};
// Returns the next block to process
static CUTLASS_DEVICE void incrIteration(
Params const& p,
int32_t query_start,
int32_t key_start,
int32_t& next_query,
int32_t& next_key) {
next_query = query_start + kBlockSizeI;
next_key = key_start;
auto query_shift = getQueryStartShift(p);
// Wrap around
if (query_shift) {
if (next_query >= p.num_queries) {
next_query = getSmallestQueryForKey(p, key_start);
return;
} else if (query_start < query_shift && query_shift <= next_query) {
// jump to next key
} else {
return;
}
} else {
if (next_query < p.num_queries) {
return;
}
// jump to next key
}
// Next key
next_key = key_start + p.num_splits_key_device() * kBlockSizeJ;
next_query = getQueryStart(p, next_key);
}
template <bool kForceReloadK>
static CUTLASS_DEVICE void prologueQkNextIteration(
SharedStorage& shared_storage,
Params const& p,
int32_t query_start,
int32_t key_start,
uint8_t warp_id,
uint8_t lane_id) {
if (query_start >= p.num_queries || key_start >= p.num_keys) {
return;
}
static constexpr bool kReloadK =
kForceReloadK || !MatmulQK::Mma::kSmemContainsEntireMat;
int thread_id = 32 * warp_id + lane_id;
typename MatmulQK::Mma::IteratorA iterator_A(
{int32_t(p.k_strideM)},
p.key_ptr + key_start * p.k_strideM,
{p.num_keys - key_start, p.head_dim},
thread_id,
cutlass::MatrixCoord{0, 0});
typename MatmulQK::Mma::IteratorB iterator_B(
{int32_t(p.q_strideM)},
p.query_ptr + query_start * p.q_strideM,
{p.head_dim, p.num_queries - query_start},
thread_id,
cutlass::MatrixCoord{0, 0});
MatmulQK::Mma::prologue<kReloadK, true>(
shared_storage.mm_qk_k(),
shared_storage.mm_qk_q(),
iterator_A,
iterator_B,
thread_id,
p.head_dim);
}
template <bool skipBoundsChecks>
static CUTLASS_DEVICE void writeFragsToGmem(
SharedStorage& shared_storage,
OutputFragments& output_frags,
Params const& p,
int32_t key_start,
uint8_t warp_id,
uint8_t lane_id) {
uint16_t thread_id = 32 * warp_id + lane_id;
int32_t num_keys_in_block = skipBoundsChecks
? MatmulQK::Mma::Shape::kM
: cutlass::fast_min(
(int32_t)MatmulQK::Mma::Shape::kM, p.num_keys - key_start);
typename MatmulGradV::OutputTileIterator outputV_it(
typename MatmulGradV::OutputTileIterator::Params{p.gV_strideM()},
p.grad_value_ptr + key_start * p.gV_strideM(),
{num_keys_in_block, p.head_dim_value},
thread_id);
accumulateInGmem<MatmulGradV>(
shared_storage.gradV_epilogue_final(),
output_frags.gradV,
outputV_it,
true,
warp_id,
lane_id);
typename MatmulGradK::OutputTileIterator outputK_it(
typename MatmulGradK::OutputTileIterator::Params{p.gK_strideM()},
p.grad_key_ptr + key_start * p.gK_strideM(),
{num_keys_in_block,
false ? MatmulGradK::ThreadblockShape::kN : p.head_dim},
thread_id);
accumulateInGmem<MatmulGradK>(
shared_storage.gradK_epilogue_final(),
output_frags.gradK,
outputK_it,
true,
warp_id,
lane_id);
}
template <typename MatmulT>
static CUTLASS_DEVICE void accumulateInGmem(
typename MatmulT::DefaultEpilogue::SharedStorage& epilogue_smem,
typename MatmulT::Mma::FragmentC const& accum,
typename MatmulT::OutputTileIterator output_it,
bool first,
uint8_t warp_id,
uint8_t lane_id) {
using DefaultEpilogue = typename MatmulT::DefaultEpilogue;
using DefaultOutputOp = typename MatmulT::DefaultOutputOp;
using Mma = typename MatmulT::Mma;
int thread_id = 32 * warp_id + lane_id;
DISPATCH_BOOL(
first, kIsFirst, ([&]() {
static constexpr auto ScaleType = kIsFirst
? cutlass::epilogue::thread::ScaleType::Nothing
: cutlass::epilogue::thread::ScaleType::NoBetaScaling;
using EpilogueOutputOp =
typename cutlass::epilogue::thread::LinearCombination<
typename DefaultOutputOp::ElementOutput,
DefaultOutputOp::kCount,
typename DefaultOutputOp::ElementAccumulator,
typename DefaultOutputOp::ElementCompute,
ScaleType>;
using Epilogue =
typename cutlass::epilogue::threadblock::EpiloguePipelined<
typename DefaultEpilogue::Shape,
typename Mma::Operator,
DefaultEpilogue::kPartitionsK,
typename MatmulT::OutputTileIterator,
typename DefaultEpilogue::AccumulatorFragmentIterator,
typename DefaultEpilogue::WarpTileIterator,
typename DefaultEpilogue::SharedLoadIterator,
EpilogueOutputOp,
typename DefaultEpilogue::Padding,
DefaultEpilogue::kFragmentsPerIteration,
true // IterationsUnroll
>;
EpilogueOutputOp rescale({1, 1});
Epilogue epilogue(epilogue_smem, thread_id, warp_id, lane_id);
epilogue(rescale, output_it, accum, output_it);
}));
}
template <int kElementsPerAccess>
static CUTLASS_DEVICE void computeDelta(
Params const& p,
int32_t query_start,
uint8_t warp_id,
uint8_t lane_id) {
// Each thread computes one value for Delta
// Depending on warp configuration, we might have multiple
// threads of the same warp working on the same row
using AccessType = cutlass::Array<scalar_t, kElementsPerAccess>;
static_assert(kNumThreads >= kBlockSizeI, "");
static constexpr int kNumThreadsPerLine = kNumThreads / kBlockSizeI;
int16_t thread_id = 32 * warp_id + lane_id;
int16_t laneFirstCol = kElementsPerAccess * (lane_id % kNumThreadsPerLine);
int16_t laneRow = thread_id / kNumThreadsPerLine;
bool rowPred = (query_start + laneRow) < p.num_queries;
bool pred = rowPred;
// on windows, previous syntax __restrict__ AccessType*
// resulted in error: "restrict" is not allowed
const AccessType* __restrict__ grad_output_ptr =
reinterpret_cast<const AccessType*>(
p.grad_output_ptr + (query_start + laneRow) * p.gO_strideM +
laneFirstCol);
const AccessType* __restrict__ output_ptr =
reinterpret_cast<const AccessType*>(
p.output_ptr + (query_start + laneRow) * p.o_strideM() +
laneFirstCol);
static constexpr int64_t kMaxIters =
kMaxK / (kElementsPerAccess * kNumThreadsPerLine);
constexpr int kPipelineStages = 2;
accum_t delta_value = accum_t(0);
using GlobalLoad =
cutlass::arch::global_load<AccessType, sizeof(AccessType)>;
AccessType frag_grad_output[kPipelineStages];
AccessType frag_output[kPipelineStages];
auto loadAndIncrement = [&](int ld_pos, bool is_valid) {
frag_grad_output[ld_pos].clear();
frag_output[ld_pos].clear();
GlobalLoad(frag_grad_output[ld_pos], grad_output_ptr, is_valid);
GlobalLoad(frag_output[ld_pos], output_ptr, is_valid);
grad_output_ptr += kNumThreadsPerLine;
output_ptr += kNumThreadsPerLine;
};
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < kPipelineStages - 1; ++iter) {
int ld_pos = iter % kPipelineStages;
pred = pred &&
(laneFirstCol + iter * kElementsPerAccess * kNumThreadsPerLine) <
p.head_dim_value;
loadAndIncrement(ld_pos, pred);
}
auto columnIteration = [&](int iter) {
// Load for next iter
int ld_pos = (iter + kPipelineStages - 1) % kPipelineStages;
pred = pred &&
(laneFirstCol +
(iter + kPipelineStages - 1) * kElementsPerAccess *
kNumThreadsPerLine) < p.head_dim_value;
loadAndIncrement(ld_pos, pred);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < AccessType::kElements; ++i) {
delta_value += accum_t(frag_output[iter % kPipelineStages][i]) *
accum_t(frag_grad_output[iter % kPipelineStages][i]);
}
};
// If we have a small lower-bound for K, we can unroll the loop
if (kMaxK <= 256) {
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < kMaxIters; ++iter) {
columnIteration(iter);
}
} else {
int num_iters =
ceil_div(p.head_dim_value, kElementsPerAccess * kNumThreadsPerLine) *
(kElementsPerAccess * kNumThreadsPerLine);
for (int iter = 0; iter < num_iters; ++iter) {
columnIteration(iter);
}
}
// Reduce between workers
static_assert(
kNumThreadsPerLine == 1 || kNumThreadsPerLine == 2 ||
kNumThreadsPerLine == 4,
"");
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < kNumThreadsPerLine; i *= 2) {
delta_value = delta_value + __shfl_xor_sync(0xffffffff, delta_value, i);
}
// Store in gmem
if (rowPred) {
p.delta_ptr[query_start + laneRow] = delta_value;
}
}
};
template <typename AK>
__global__ void __launch_bounds__(AK::kNumThreads, AK::kMinBlocksPerSm)
attention_kernel_backward_batched_impl(typename AK::Params p) {
if (!p.advance_to_block()) {
return;
}
AK::attention_kernel(p);
}
template <typename AK>
__global__ void __launch_bounds__(AK::kNumThreads, AK::kMinBlocksPerSm)
attention_kernel_backward_batched(typename AK::Params params);
| examples/41_fused_multi_head_attention/kernel_backward.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/kernel_backward.h",
"repo_id": "examples",
"token_count": 44873
} | 6 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Performs a dual gemm in one fused kernel:
```
D0 = epilogue0(X @ B0, C0)
D1 = epilogue1(X @ B1, C1)
D2 = element_wise(D0, D1)
```
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/threadblock/default_mma.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "../kernel/dual_gemm.h"
#include "../dual_gemm_common.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B0 matrix operand
typename LayoutB0_,
/// Layout type for B1 matrix operand
typename LayoutB1_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp0_,
typename EpilogueOutputOp1_,
typename EpilogueOutputOp2_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
bool StoreD0 = true,
bool StoreD1 = true,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator>
class DualGemm {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB0 = LayoutB0_;
using LayoutB1 = LayoutB1_;
using TensorRefB0 = TensorRef<ElementB const, LayoutB0>;
using TensorRefB1 = TensorRef<ElementB const, LayoutB1>;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp0 = EpilogueOutputOp0_;
using EpilogueOutputOp1 = EpilogueOutputOp1_;
using EpilogueOutputOp2 = EpilogueOutputOp2_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp1::kCount;
static bool const kSplitKSerial = SplitKSerial;
static bool constexpr kStoreD0 = StoreD0;
static bool constexpr kStoreD1 = StoreD1;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
using LayoutScaleBias = layout::RowMajor;
/// Define the kernel
/// Define the threadblock-scoped matrix multiply-accumulate
static_assert(ArchTag::kMinComputeCapability >= 80, "Only multistage is implemented");
static_assert(kStages >= 3, "Only multistage is implemented");
using Mma0 = typename cutlass::gemm::threadblock::DefaultMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB0, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, ArchTag,
ThreadblockShape, WarpShape,
InstructionShape, Stages, Operator>::ThreadblockMma;
using Mma1 = typename cutlass::gemm::threadblock::DefaultMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB1, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, ArchTag,
ThreadblockShape, WarpShape,
InstructionShape, Stages, Operator>::ThreadblockMma;
using DualMma = threadblock::DualMmaMultistage<
typename Mma0::Shape,
typename Mma0::IteratorA,
typename Mma0::SmemIteratorA,
Mma0::kCacheOpA,
typename Mma0::IteratorB,
typename Mma0::SmemIteratorB,
Mma0::kCacheOpB,
typename Mma1::IteratorB,
typename Mma1::SmemIteratorB,
typename Mma0::ElementC,
typename Mma0::LayoutC,
typename Mma0::Policy,
typename Mma1::Policy,
Mma0::kStages,
SharedMemoryClearOption::kNone
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue0 =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename DualMma::Operator0, kPartitionsK, EpilogueOutputOp0,
EpilogueOutputOp0::kCount>::Epilogue;
using Epilogue1 =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape, typename DualMma::Operator1, kPartitionsK, EpilogueOutputOp1,
EpilogueOutputOp1::kCount>::Epilogue;
/// Define the kernel-level GEMM operator.
using DualGemmKernel = kernel::DualGemm<
DualMma,
Epilogue0, Epilogue1, EpilogueOutputOp2,
ThreadblockSwizzle, kSplitKSerial,
kStoreD0, kStoreD1>;
/// Argument structure
struct Arguments {
//
// Data members
//
DualGemmMode mode;
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A0;
TensorRef<ElementB const, LayoutB0> ref_B0;
TensorRef<ElementC const, LayoutC> ref_C0;
TensorRef<ElementC, LayoutC> ref_D0;
TensorRef<ElementB const, LayoutB1> ref_B1;
TensorRef<ElementC const, LayoutC> ref_C1;
TensorRef<ElementC, LayoutC> ref_D1;
TensorRef<ElementC, LayoutC> ref_D2;
typename EpilogueOutputOp0::Params epilogue0;
typename EpilogueOutputOp1::Params epilogue1;
typename EpilogueOutputOp2::Params epilogue2;
int split_k_slices;
int batch_count;
int64_t batch_stride_A;
int64_t batch_stride_B0;
int64_t batch_stride_B1;
int64_t batch_stride_C;
int64_t batch_stride_D;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments(): problem_size(0, 0, 0), split_k_slices(1) {
}
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
DualGemmMode mode,
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A0_,
TensorRef<ElementB const, LayoutB0> ref_B0_,
TensorRef<ElementC const, LayoutC> ref_C0_,
TensorRef<ElementC, LayoutC> ref_D0_,
TensorRef<ElementB const, LayoutB1> ref_B1_,
TensorRef<ElementC const, LayoutC> ref_C1_,
TensorRef<ElementC, LayoutC> ref_D1_,
TensorRef<ElementC, LayoutC> ref_D2_,
typename EpilogueOutputOp0::Params epilogue0_ =
typename EpilogueOutputOp0::Params(),
typename EpilogueOutputOp1::Params epilogue1_ =
typename EpilogueOutputOp1::Params(),
typename EpilogueOutputOp2::Params epilogue2_ =
typename EpilogueOutputOp2::Params(),
int split_k_slices_ = 1,
int batch_count = 1,
int64_t batch_stride_A = 0,
int64_t batch_stride_B0 = 0,
int64_t batch_stride_B1 = 0,
int64_t batch_stride_C = 0,
int64_t batch_stride_D = 0
):
mode(mode),
problem_size(problem_size_),
ref_A0(ref_A0_),
ref_B0(ref_B0_),
ref_C0(ref_C0_),
ref_D0(ref_D0_),
ref_B1(ref_B1_),
ref_C1(ref_C1_),
ref_D1(ref_D1_),
ref_D2(ref_D2_),
epilogue0(epilogue0_),
epilogue1(epilogue1_),
epilogue2(epilogue2_),
split_k_slices(split_k_slices_),
batch_count(batch_count),
batch_stride_A(batch_stride_A),
batch_stride_B0(batch_stride_B0),
batch_stride_B1(batch_stride_B1),
batch_stride_C(batch_stride_C),
batch_stride_D(batch_stride_D) {
}
};
private:
/// Kernel parameters object
typename DualGemmKernel::Params params_;
public:
/// Constructs the GEMM.
DualGemm() = default;
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
if (args.mode == DualGemmMode::kBatched && kSplitKSerial) {
return Status::kErrorInvalidProblem;
}
if (!kSplitKSerial && args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
if (kStoreD0 != (args.ref_D0.data() != nullptr)) {
return Status::kErrorInternal;
}
if (kStoreD1 != (args.ref_D1.data() != nullptr)) {
return Status::kErrorInternal;
}
Status status = DualGemmKernel::can_implement(
args.problem_size,
args.ref_A0.non_const_ref(),
args.ref_B0.non_const_ref(),
args.ref_C0.non_const_ref(),
args.ref_D0,
args.ref_B1.non_const_ref(),
args.ref_C1.non_const_ref(),
args.ref_D1,
args.ref_D2
);
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
if (kSplitKSerial && args.split_k_slices > 1) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.mode == DualGemmMode::kBatched ? args.batch_count : args.split_k_slices);
if (kSplitKSerial) {
if (args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
}
// Initialize the Params structure
params_ = typename DualGemmKernel::Params{
args.mode,
args.problem_size,
grid_shape,
args.ref_A0.non_const_ref(),
args.ref_B0.non_const_ref(),
args.ref_C0.non_const_ref(),
args.ref_D0,
args.ref_B1.non_const_ref(),
args.ref_C1.non_const_ref(),
args.ref_D1,
args.ref_D2,
args.epilogue0,
args.epilogue1,
args.epilogue2,
reinterpret_cast<int *>(workspace),
args.batch_stride_A,
args.batch_stride_B0,
args.batch_stride_B1,
args.batch_stride_C,
args.batch_stride_D,
};
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
params_.ref_A0.reset(args.ref_A0.non_const_ref().data());
params_.ref_B0.reset(args.ref_B0.non_const_ref().data());
params_.ref_C0.reset(args.ref_C0.non_const_ref().data());
params_.ref_D0.reset(args.ref_D0.data());
params_.ref_B1.reset(args.ref_B1.non_const_ref().data());
params_.ref_C1.reset(args.ref_C1.non_const_ref().data());
params_.ref_D1.reset(args.ref_D1.data());
params_.ref_D2.reset(args.ref_D2.data());
params_.output_op_0 = args.epilogue0;
params_.output_op_1 = args.epilogue1;
params_.output_op_2 = args.epilogue2;
params_.semaphore = reinterpret_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(DualGemmKernel::kThreadCount, 1, 1);
cudaError_t result;
int smem_size = int(sizeof(typename DualGemmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
result = cudaFuncSetAttribute(Kernel<DualGemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
cutlass::Kernel<DualGemmKernel><<<grid, block, smem_size, stream>>>(params_);
result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/45_dual_gemm/device/dual_gemm.h/0 | {
"file_path": "examples/45_dual_gemm/device/dual_gemm.h",
"repo_id": "examples",
"token_count": 6633
} | 7 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Additional permutation information for the example.
*/
#include "cutlass/layout/permute.h"
#include "cutlass/gemm/gemm.h"
namespace example
{
using namespace cute;
// This struct is specialized below for different CUTLASS 2.x permutation ops
// to describe the operation in terms of target CuTe shape and stride order.
template<class Permute>
struct PermuteTraits {};
// Use X as a placeholder for shape division result
using X = Underscore;
// Reshape a rank-2 shape into a multidimensional shape.
// Input:
// shape = (A, B, ...)
// target_shape = ((A1, ..., X, ..., Am), (B1, ..., X, ..., Bn), ...)
// Output:
// ((A1, ..., A/prod(A1..Am), ..., Am), (B1, ..., B/prod(B1..Bn), ..., Bn), ...)
template<class Shape, class TargetShape>
constexpr auto
reshape(Shape const& shape, TargetShape const& target_shape)
{
if constexpr (is_tuple<Shape>::value) {
return cute::transform(shape, target_shape, [](auto && s, auto && t){ return reshape(s, t); });
}
else {
auto idx = find_if(target_shape, [](auto x){ return is_underscore<decltype(x)>{}; });
constexpr int I = decltype(idx)::value;
static_assert(I < tuple_size_v<TargetShape>, "Each mode of TargetShape must contain a placeholder X");
auto divisors = remove<I>(target_shape);
assert(shape % product(divisors) == 0);
return replace<I>(target_shape, shape / product(divisors));
}
}
// Given a tensor layout, compute a permutation layout consisting of:
// - sub-modes corresponding to the implied multidimensional shape of the source tensor
// - strides accounting for the permutation operation being performed
template<class Permute, bool Transpose, class Shape, class Stride>
constexpr auto
make_permute_layout(Layout<Shape,Stride> const& layout) {
static_assert(cute::rank(Shape{}) == 3, "Only rank-3 layouts are supported");
if constexpr (Transpose) {
// Deal with tensor B by transposing appropriately before and after computing the permute layout.
// Its CuTe-canonical mode order is [N,K,L], while permute operations expect [row,col,batch].
return select<1,0,2>(make_permute_layout<Permute, false>(select<1,0,2>(layout)));
}
else {
if constexpr (cutlass::layout::is_trivial_permute<Permute>) {
// Special case for NoPermute. Use a depth-2 layout for consistency with other permutations.
using ShapeProfile = tuple<tuple<X>, tuple<X>, tuple<X>>;
return unflatten(layout, ShapeProfile{});
}
else {
// Here's where the permutation layout is actually built
using ShapeProfile = typename PermuteTraits<Permute>::ShapeProfile;
using StrideOrder = typename PermuteTraits<Permute>::StrideOrder;
return make_ordered_layout(reshape(layout.shape(), ShapeProfile{}), StrideOrder{});
}
}
}
namespace detail
{
template<int I>
struct is_constant_pred {
template <class T>
constexpr auto operator()(T) {
return is_constant<I, T>{};
}
};
template<class Permutation, int... I>
constexpr auto
inverse_impl(Permutation const & perm, seq<I...>) {
return cute::make_tuple(Int<find_if(Permutation{}, is_constant_pred<I>{})>{}...);
}
} // namespace detail
// Compute an inverse of a permutation represented as a tuple of cute::Int<>
template<class Permutation>
constexpr auto
inverse(Permutation const & perm) {
auto flat_perm = flatten(perm);
return unflatten(detail::inverse_impl(flat_perm, tuple_seq<decltype(flat_perm)>{}), perm);
}
template<class T>
using inverse_t = decltype(inverse(T{}));
// Given a rank-2 layout of tensor that is assumed to have been permuted,
// compute the original rank-2 layout of the tensor prior to the permutation.
// This is needed to form the correct input to the standalone permutation kernel.
template<class Permute, bool Transpose, class Shape, class Stride>
constexpr auto
make_original_layout(Layout<Shape,Stride> const& layout) {
static_assert(cute::rank(Shape{}) == 3, "Only rank-3 layouts are supported");
if constexpr (Transpose) {
// Deal with tensor B by transposing appropriately before and after computing the permute layout.
// Its CuTe-canonical mode order is [N,K,L], while permute operations expect [row,col,batch].
return select<1,0,2>(make_original_layout<Permute, false>(select<1,0,2>(layout)));
}
else {
using ShapeProfile = typename PermuteTraits<Permute>::ShapeProfile;
using IndexOrder = typename PermuteTraits<Permute>::IndexOrder;
using OrigOrder = conditional_t<cutlass::gemm::detail::is_major<0,Stride>(), seq<0,1,2>, seq<1,0,2>>;
auto orig_shape = select(flatten(reshape(layout.shape(), ShapeProfile{})), IndexOrder{});
// print("Permuted shape: "); print(reshape(layout.shape(), ShapeProfile{})); print("\n");
// print("Original shape: "); print(orig_shape); print("\n");
return make_ordered_layout(product_each(orig_shape), OrigOrder{});
}
}
/////////////// Tensor4DPermute0213 ////////////////////
template<int D1, int D2>
struct PermuteTraits<cutlass::layout::Tensor4DPermute0213ColumnMajor<D1, D2>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<X,Int<D1>>, Shape<Int<D2>,X>, Shape<X>>;
using IndexOrder = Step<Step<_0,_2>, Step<_1,_3>, Step<_4>>;
using StrideOrder = inverse_t<IndexOrder>; // Step<Step<_0,_2>, Step<_1,_3>, Step<_4>>;
};
template<int D1, int D2>
struct PermuteTraits<cutlass::layout::Tensor4DPermute0213ColumnMajorInverse<D1, D2>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<X,Int<D2>>, Shape<Int<D1>,X>, Shape<X>>;
using IndexOrder = Step<Step<_0,_2>, Step<_1,_3>, Step<_4>>;
using StrideOrder = inverse_t<IndexOrder>; // Step<Step<_0,_2>, Step<_1,_3>, Step<_4>>;
};
template<int D1, int D2>
struct PermuteTraits<cutlass::layout::Tensor4DPermute0213RowMajor<D1, D2>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<Int<D1>,X>, Shape<X,Int<D2>>, Shape<X>>;
using IndexOrder = Step<Step<_1,_3>, Step<_0,_2>, Step<_4>>;
using StrideOrder = Step<Step<_1,_3>, Step<_0,_2>, Step<_4>>;
};
template<int D1, int D2>
struct PermuteTraits<cutlass::layout::Tensor4DPermute0213RowMajorInverse<D1, D2>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<Int<D2>,X>, Shape<X,Int<D1>>, Shape<X>>;
using IndexOrder = Step<Step<_1,_3>, Step<_0,_2>, Step<_4>>;
using StrideOrder = Step<Step<_1,_3>, Step<_0,_2>, Step<_4>>;
};
/////////////// Tensor4DPermuteBMM0321 ////////////////////
template<int D>
struct PermuteTraits<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D>>
{
static constexpr bool kBatched = true;
using ShapeProfile = Shape<Shape<X>, Shape<X>, Shape<Int<D>,X>>;
using IndexOrder = Step<Step<_0,_2>, Step<_1>, Step<_3>>;
using StrideOrder = Step<Step<_0>, Step<_2>, Step<_1,_3>>;
};
template<int D>
struct PermuteTraits<cutlass::layout::Tensor4DPermuteBMM0321ColumnMajorInverse<D>>
{
static constexpr bool kBatched = true;
using ShapeProfile = Shape<Shape<X,Int<D>>, Shape<X>, Shape<X>>;
using IndexOrder = Step<Step<_0>, Step<_2>, Step<_1,_3>>;
using StrideOrder = Step<Step<_0,_2>, Step<_1>, Step<_3>>;
};
/////////////// Tensor4DPermuteBMM0213 ////////////////////
template<int D>
struct PermuteTraits<cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D>>
{
static constexpr bool kBatched = true;
using ShapeProfile = Shape<Shape<X>, Shape<X>, Shape<Int<D>,X>>;
using IndexOrder = Step<Step<_0>, Step<_1,_2>, Step<_3>>;
using StrideOrder = Step<Step<_2>, Step<_0>, Step<_1,_3>>;
};
template<int D>
struct PermuteTraits<cutlass::layout::Tensor4DPermuteBMM0213RowMajorInverse<D>>
{
static constexpr bool kBatched = true;
using ShapeProfile = Shape<Shape<X>, Shape<X,Int<D>>, Shape<X>>;
using IndexOrder = Step<Step<_0>, Step<_1>, Step<_2,_3>>;
using StrideOrder = Step<Step<_1>, Step<_0,_2>, Step<_3>>;
};
/////////////// Tensor5DPermute02413 ////////////////////
template<int D1, int D2, int D3>
struct PermuteTraits<cutlass::layout::Tensor5DPermute02413ColumnMajor<D1, D2, D3>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<X,Int<D1>>, Shape<Int<D2>,Int<D3>,X>, Shape<X>>;
using IndexOrder = Step<Step<_0,_2>, Step<_4,_1,_3>, Step<_5>>;
using StrideOrder = inverse_t<IndexOrder>; // Step<Step<_0,_3>, Step<_1,_4,_2>, Step<_5>>;
};
template<int D1, int D2, int D3>
struct PermuteTraits<cutlass::layout::Tensor5DPermute02413ColumnMajorInverse<D1, D2, D3>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<X,Int<D2>>, Shape<X,Int<D1>,Int<D3>>, Shape<X>>;
using IndexOrder = Step<Step<_0,_3>, Step<_1,_4,_2>, Step<_5>>;
using StrideOrder = inverse_t<IndexOrder>; // Step<Step<_0,_2>, Step<_4,_1,_3>, Step<_5>>;
};
/////////////// Tensor5DPermute20314 ////////////////////
template<int D1, int D2, int D3>
struct PermuteTraits<cutlass::layout::Tensor5DPermute20314RowMajor<D1, D2, D3>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<Int<D1>,X>, Shape<X,Int<D3>,Int<D2>>, Shape<X>>;
using IndexOrder = Step<Step<_2,_0>, Step<_3,_1,_4>, Step<_5>>;
using StrideOrder = Step<Step<_1,_3>, Step<_0,_2,_4>, Step<_5>>;
};
template<int D1, int D2, int D3>
struct PermuteTraits<cutlass::layout::Tensor5DPermute20314RowMajorInverse<D1, D2, D3>>
{
static constexpr bool kBatched = false;
using ShapeProfile = Shape<Shape<X,Int<D2>>, Shape<X,Int<D1>,Int<D3>>, Shape<X>>;
using IndexOrder = Step<Step<_3,_0>, Step<_2,_4,_1>, Step<_5>>;
using StrideOrder = Step<Step<_4,_2>, Step<_0,_3,_1>, Step<_5>>;
};
} // namespace example
| examples/53_hopper_gemm_permute/permute_traits.hpp/0 | {
"file_path": "examples/53_hopper_gemm_permute/permute_traits.hpp",
"repo_id": "examples",
"token_count": 4116
} | 8 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cute/tensor.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/atom/copy_atom.hpp"
#include <random>
#include "cutlass/util/print_error.hpp"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/collective/collective_mma.hpp"
using namespace cute;
struct AmpereUnpredicatedFprop {
//
// Static config for conv problem shape
//
using D = _6;
using H = _4;
using W = _4;
using T = _3;
using R = _3;
using S = _3;
using Z = _4;
using P = _2;
using Q = _2;
using C = _64;
using K = _128;
// Tiler config
using Tiler_K = decltype(cute::min(K{}, _128{}));
using Tiler_C = decltype(cute::min(C{}, _32{}));
using Tiler_N = _4;
using TileM = Tiler_K;
using TileN = Shape<Tiler_N, Z, P, Q>;
using TileK = Shape<Tiler_C,_1,_1,_1>;
using PIPE = _3;
using TilerFlt = Shape<TileM, TileK>;
using TilerAct = Shape<TileN, TileK>;
using TilerOut = Shape<TileM, TileN>;
using TileSizeM = Int<size(TileM{})>;
using TileSizeN = Int<size(TileN{})>;
using TileSizeK = Int<size(TileK{})>;
static constexpr int Stages = PIPE::value;
using ElementFlt = tfloat32_t;
using ElementAct = tfloat32_t;
using ElementOut = float;
using TiledMma = TiledMMA<
MMA_Atom<SM80_16x8x8_F32TF32TF32F32_TN>,
Layout<Shape<_2,_2,_1>>,
Tile<_32,_32,Underscore>>;
static constexpr int MaxThreadsPerBlock = size(TiledMma{});
static constexpr int MinBlocksPerMultiprocessor = 1;
union SharedStorage {
struct {
ElementFlt sAMatrix[size(TileM{}) * size(TileK{}) * size(PIPE{})];
ElementAct sBMatrix[size(TileN{}) * size(TileK{}) * size(PIPE{})];
} mainloop;
struct {
ElementOut sCMatrix[size(TileM{}) * size(TileN{})];
} epilogue;
};
//
// Stencil tensor
//
using GmemLayoutFlt = decltype(make_ordered_layout(
Shape< K, Shape< C, T, R, S>>{},
tuple<_4, tuple<_0,_3,_2,_1>>{}));
// We have 64 elements * 32b each in the major mode that we can vectorize
// Max vector size is 128b, so lay 16 threads along the major mode with a vector size of 4
// Rest along the minor mode
using GmemTiledCopyFlt = decltype(make_tiled_copy(
Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<uint128_t>, ElementFlt>{},
Layout<Shape <_16, _8>,
Stride< _8, _1>>{},
Layout<Shape < _1, _4>>{}));
// Following layout is also correct, but trades off dynamic strides in the slice for bank conflict free accesses
// using SmemLayoutFlt = decltype(
// composition(Swizzle<3,2,3>{},
// make_ordered_layout(
// Shape<TileSizeM,TileSizeK,PIPE>{},
// tuple< _1, _0, _2>{})));
using SmemLayoutAtomFlt = decltype(
composition(Swizzle<1,2,3>{},
Layout<Shape <_8,Shape <_4, _2>>,
Stride<_4,Stride<_1,_32>>>{}));
using SmemCopyAtomFlt = Copy_Atom<SM75_U32x4_LDSM_N, ElementFlt>;
//
// Activation tensor
//
// Activation tensor is major in the contraction mode, so vectorize that mode first
// Then lay out the rest of the threads along the other mode
using GmemTiledCopyAct = decltype(make_tiled_copy(
Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<uint128_t>, ElementAct>{},
Layout<Shape <_16, _8>,
Stride< _8, _1>>{},
Layout<Shape < _1, _4>>{}));
// Following layout is also correct, but trades off dynamic strides in the slice for bank conflict free accesses
// using SmemLayoutAct = decltype(
// composition(Swizzle<3,2,3>{},
// make_ordered_layout(
// Shape<TileSizeN,TileSizeK,PIPE>{},
// tuple< _1, _0, _2>{})));
using SmemLayoutAtomAct = decltype(
composition(Swizzle<1,2,3>{},
Layout<Shape <_8,Shape <_4, _2>>,
Stride<_4,Stride<_1,_32>>>{}));
using SmemCopyAtomAct = Copy_Atom<SM75_U32x4_LDSM_N, ElementAct>;
//
// Output tensor
//
using GmemTiledCopyOut = decltype(make_tiled_copy(
Copy_Atom<UniversalCopy<uint128_t>, ElementAct>{},
Layout<Shape <_8, _16>,
Stride<_1, _8>>{},
Layout<Shape <_4, _1>>{}));
using SmemCopyAtomOut = Copy_Atom<UniversalCopy<uint32_t>, ElementOut>;
// This can be optimized to make accesses BCF, but we use a col-major layout here to show off composability
using SmemLayoutOut = Layout<Shape<TileSizeM, TileSizeN>>;
//
// Conv functor
//
template <class EngineFlt, class TensorActivation, class TensorOutput>
void __device__
operator()(cute::Tensor<EngineFlt, GmemLayoutFlt> mFlt, // ( K, (C,T,R,S))
TensorActivation mAct, // ((N,Z,P,Q), (C,T,R,S))
TensorOutput mOut, // ( K, (N,Z,P,Q))
char* smem_buf) const {
using namespace cute;
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveMma<
cutlass::gemm::MainloopSm80CpAsyncUnpredicated<PIPE::value>,
Shape<TileM,TileN,TileK>,
ElementFlt,
Underscore, // Ignore the stride, we are passing full cute::Tensor to operator()
ElementAct,
Underscore, // Ignore the stride, we are passing full cute::Tensor to operator()
TiledMma,
GmemTiledCopyFlt,
SmemLayoutAtomFlt,
SmemCopyAtomFlt,
cute::identity,
GmemTiledCopyAct,
SmemLayoutAtomAct,
SmemCopyAtomAct,
cute::identity>;
TiledMma tiled_mma;
Tensor accum = partition_fragment_C(tiled_mma, TilerOut{});
clear(accum);
// Set up tensors
// NOTE: blockIdx.x projects onto act-NDHW mode, y along the flt-K mode for the sake of higher dynamic range in NDHW
Tensor gA_mk = local_tile(mFlt, TilerFlt{}, make_coord(_,_)); // (BLK_M,BLK_K,m',k')
Tensor gB_nk = local_tile(mAct, TilerAct{}, make_coord(_,_)); // (BLK_N,BLK_K,n',_1)
Tensor gC_mn = local_tile(mOut, TilerOut{}, make_coord(_,_)); // (BLK_M,BLK_N,m',n')
// Compute m_coord and n_coord with their post-tiled shapes
auto m_coord = idx2crd(int(blockIdx.y), shape<2>(gA_mk));
auto n_coord = idx2crd(int(blockIdx.x), shape<2>(gB_nk));
Tensor gA = gA_mk(_,_,m_coord,_); // (BLK_M,BLK_K,k')
Tensor gB = gB_nk(_,_,n_coord,_); // (BLK_N,BLK_K,_1)
Tensor gC = gC_mn(_,_,m_coord,n_coord); // (BLK_M,BLK_N)
auto k_tile_iter = cute::make_coord_iterator(size<2>(gA));
int k_tile_count = size<2>(gA);
CollectiveMainloop collective_mma;
collective_mma(
accum,
gA,
gB,
accum,
k_tile_iter, k_tile_count,
Underscore{}, // no residue since we do not support predication
threadIdx.x,
smem_buf);
//
// Epilogue
//
SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf);
Tensor sC = make_tensor(make_smem_ptr(&storage.epilogue.sCMatrix[0]), SmemLayoutOut{});
auto smem_tiled_copy_C = make_tiled_copy_C(SmemCopyAtomOut{}, tiled_mma);
auto smem_thr_copy_C = smem_tiled_copy_C.get_slice(threadIdx.x);
auto tCrC = smem_thr_copy_C.retile_S(accum);
auto tCsC = smem_thr_copy_C.partition_D(sC);
copy(smem_tiled_copy_C, tCrC, tCsC);
__syncthreads();
GmemTiledCopyOut gmem_tiled_copy_C;
auto gmem_thr_copy_C = gmem_tiled_copy_C.get_slice(threadIdx.x);
auto tDsC = gmem_thr_copy_C.partition_S(sC);
auto tDgC = gmem_thr_copy_C.partition_D(gC);
copy(gmem_tiled_copy_C, tDsC, tDgC);
#if 0
if (thread0()) {
print("mAct = "); print(mAct); print('\n');
print("mFlt = "); print(mFlt); print('\n');
print("mOut = "); print(mOut); print('\n');
print("gA = "); print(gA); print('\n');
print("gB = "); print(gB); print('\n');
print("gC = "); print(gC); print('\n');
print("sA = "); print(sA.layout()); print('\n');
print("sB = "); print(sB.layout()); print('\n');
print("sC = "); print(sC.layout()); print('\n');
print("tAgA = "); print(tAgA.layout()); print('\n');
print("tBgB = "); print(tBgB.layout()); print('\n');
print("tAsA = "); print(tAsA.layout()); print('\n');
print("tBsB = "); print(tBsB.layout()); print('\n');
print("tCsA = "); print(tCsA.layout()); print('\n');
print("tCsB = "); print(tCsB.layout()); print('\n');
print("tCrC = "); print(tCrC.layout()); print('\n');
print("tCsC = "); print(tCsC.layout()); print('\n');
print("tDsC = "); print(tDsC.layout()); print('\n');
print("tDgC = "); print(tDgC.layout()); print('\n');
print("gmem tiled copy A = "); print(gmem_tiled_copy_A); print('\n');
print("gmem tiled copy B = "); print(gmem_tiled_copy_B); print('\n');
print("gmem tiled copy C = "); print(gmem_tiled_copy_C); print('\n');
print("k_tile_count = "); print(size<2>(gA)); print('\n');
print("k_tile_iter = "); print(*k_tile_iter); print('\n');
print("K_BLOCK_MAX = "); print(K_BLOCK_MAX); print('\n');
}
#endif
}
};
template <class TensorFlt, class TensorAct, class TensorOut>
inline int
fprop_reference(
TensorFlt mStencil, // Logical MK: ( K, (C,T,R,S))
TensorAct mActivation, // Logical NK: ((N,Z,P,Q), (C,T,R,S))
TensorOut mOutput, // Logical MN: ( K, (N,Z,P,Q))
TensorOut mOutputRef) {
int32_t N = size<1,0>(mOutputRef);
int32_t Z = size<1,1>(mOutputRef);
int32_t P = size<1,2>(mOutputRef);
int32_t Q = size<1,3>(mOutputRef);
int32_t T = size<1,3>(mStencil);
int32_t R = size<1,2>(mStencil);
int32_t S = size<1,1>(mStencil);
int32_t C = size<1,0>(mStencil);
size_t K = static_cast<size_t>(size<0>(mOutputRef));
size_t NZPQ = static_cast<size_t>(size<1>(mOutputRef));
size_t CTRS = static_cast<size_t>(size<1>(mStencil));
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (size_t logical_m = 0; logical_m < K; ++logical_m) {
for (size_t logical_n = 0; logical_n < NZPQ; ++logical_n) {
auto accumulator = float(0);
for (size_t logical_k = 0; logical_k < CTRS; ++logical_k) {
accumulator += mStencil(logical_m, logical_k) * mActivation(logical_n, logical_k);
}
mOutputRef(logical_m, logical_n) = accumulator;
}
}
return print_relative_error(mOutput, mOutputRef, /*print_verbose*/ false, /*print_error*/ true, /*error_margin*/ 0.01);
}
| examples/59_ampere_gather_scatter_conv/ampere_conv_kernel.h/0 | {
"file_path": "examples/59_ampere_gather_scatter_conv/ampere_conv_kernel.h",
"repo_id": "examples",
"token_count": 5630
} | 9 |
<jupyter_start><jupyter_text>Example of using elementwise activation functions in the CUTLASS Python interfaceThis notebook walks through a basic example of using the CUTLASS Python interface to declare, compile, and run GEMMs with different epilogues.[](https://colab.research.google.com/github/NVIDIA/cutlass/blob/main/examples/python/01_epilogue.ipynb) Prerequisites for running on ColabThis notebook requires an NVIDIA GPU. If `nvidia-smi` fails, go to Runtime -> Change runtime type -> Hardware accelerator and confirm a GPU is selected.<jupyter_code>!#nvidia-smi<jupyter_output><empty_output><jupyter_text>If running on Colab, you will need to install the CUTLASS Python interface. To do so, uncomment the following line and run the cell:<jupyter_code>!#pip install nvidia-cutlass<jupyter_output><empty_output><jupyter_text>General setupWe first import various packages needed for the example and construct the input and output tensors that will be used in our example.<jupyter_code>import numpy as np
import cutlass
# This controls whether ther C++ GEMM declaration will be printed at each step. Set to `false` to
# omit this information.
print_module = True
m = 256
n = m
k = m
type_A = np.float16
type_B = np.float16
type_C = np.float16
type_D = np.float16
np.random.seed(1234)
scope_min = -4
scope_max = 4
tensor_A = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, k)).astype(type_A))
tensor_B = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(k, n)).astype(type_B))
tensor_C = np.ceil(np.random.uniform(low=scope_min, high=scope_max, size=(m, n)).astype(type_C))
alpha = np.float16(1.)
beta = np.float16(0.)
tensor_D = np.zeros(tensor_C.shape).astype(type_D)<jupyter_output><empty_output><jupyter_text>Run a GEMM with an identity activation functionTo begin, we simply run a default GEMM with an identity activation function. This performs the well-known operation `D = alpha * (A @ B) + beta * C`. This is the default activation function used, and does not need to be specified.<jupyter_code>plan = cutlass.op.Gemm(element=np.float16, layout=cutlass.LayoutType.RowMajor)
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, print_module=print_module)<jupyter_output><empty_output><jupyter_text>Run a GEMM with a ReLU element-wise activation functionCUTLASS makes it easy to support other element-wise activation functions. This results in performing an element-wise after the generic linear combination performed in a GEMM. If we call such an activation function `act`, the resulting formulation is:```D = alpha * (A @ B) + beta * CD = act(D)```Here, we will add a ReLU activation function. Given an input `x`, ReLU returns `max(x, 0)`.This is easy to do in CUTLASS. One only needs to set the plan's `activation` field.<jupyter_code>tensor_D_relu = np.zeros(tensor_C.shape).astype(type_D)
plan.activation = "relu"
plan.run(tensor_A, tensor_B, tensor_C, tensor_D_relu, print_module=print_module)<jupyter_output><empty_output><jupyter_text>We can now verify that the result of the GEMM that used a ReLU activation function:<jupyter_code>relu_ref = (tensor_D >= 0).astype(type_D) * tensor_D
np.testing.assert_array_equal(relu_ref, tensor_D_relu)<jupyter_output><empty_output><jupyter_text>Other element-wise activation functionsCUTLASS supports a variety of widely-used element-wise activation functions. We can obtain a list of these functions via the `get_activations()` method.<jupyter_code>activations = plan.activations()
for activation in activations:
print(activation)<jupyter_output><empty_output><jupyter_text>We can then run each of them:<jupyter_code>for activation in activations:
print('=============================================================================================')
print(f'Compiling and running activation {activation}')
print('=============================================================================================')
plan.activation = activation
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, print_module=print_module)<jupyter_output><empty_output><jupyter_text>To add an activation with parameter such as `leaky_relu`, a tuple should be provided containing the activation function name and the (or a list of) parameter.<jupyter_code>negative_slope = 0.5
plan.activation = ("leaky_relu", negative_slope)
plan.run(tensor_A, tensor_B, tensor_C, tensor_D, print_module=print_module)<jupyter_output><empty_output> | examples/python/01_epilogue.ipynb/0 | {
"file_path": "examples/python/01_epilogue.ipynb",
"repo_id": "examples",
"token_count": 1412
} | 10 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/util/type_traits.hpp>
#include <cute/container/tuple.hpp>
#include <cute/algorithm/functional.hpp>
#include <cute/numeric/integer_sequence.hpp>
#include <cute/numeric/integral_constant.hpp>
/// @file tuple_algorithms.hpp
/// @brief Common algorithms on (hierarchical) tuples
///
/// Code guidelines and style preferences:
///
/// For perfect forwarding, don't use std::forward, because it may not
/// be defined in device code when compiling with NVRTC. Instead, use
/// `static_cast<ParameterType&&>(parameter_name)`.
///
/// CuTe generally does not bother forwarding functions, as
/// reference-qualified member functions are rare in this code base.
///
/// Throughout CUTLASS, cute::make_tuple always needs to be called
/// namespace-qualified, EVEN If inside the cute namespace and/or in
/// scope of a "using namespace cute" declaration. Otherwise, the
/// compiler may select std::make_tuple instead of cute::make_tuple,
/// due to argument-dependent lookup. Two problems may result from
/// that.
///
/// 1. Functions have an unexpected return type (std::tuple instead of
/// cute::tuple), so functions that take cute::tuple parameters
/// fail to compile (generally inside functions that have template
/// parameters expected to be cute::tuple).
///
/// 2. std::tuple does not have the required __host__ __device__
/// markings, so the CUDA compiler complains if you use it in
/// device code.
///
/// cute::make_tuple will occur more often than std::make_tuple would
/// in modern C++ code, because cute::tuple's design deprioritizes
/// correct operation of CTAD (constructor template argument
/// deduction) in favor of implementation simplicity.
namespace cute
{
//
// Apply (Unpack)
// (t, f) => f(t_0,t_1,...,t_n)
//
namespace detail {
template <class T, class F, int... I>
CUTE_HOST_DEVICE constexpr
auto
apply(T&& t, F&& f, seq<I...>)
{
return f(get<I>(static_cast<T&&>(t))...);
}
} // end namespace detail
template <class T, class F>
CUTE_HOST_DEVICE constexpr
auto
apply(T&& t, F&& f)
{
return detail::apply(static_cast<T&&>(t), f, tuple_seq<T>{});
}
//
// Transform Apply
// (t, f, g) => g(f(t_0),f(t_1),...)
//
namespace detail {
template <class T, class F, class G, int... I>
CUTE_HOST_DEVICE constexpr
auto
tapply(T&& t, F&& f, G&& g, seq<I...>)
{
return g(f(get<I>(static_cast<T&&>(t)))...);
}
template <class T0, class T1, class F, class G, int... I>
CUTE_HOST_DEVICE constexpr
auto
tapply(T0&& t0, T1&& t1, F&& f, G&& g, seq<I...>)
{
return g(f(get<I>(static_cast<T0&&>(t0)),
get<I>(static_cast<T1&&>(t1)))...);
}
template <class T0, class T1, class T2, class F, class G, int... I>
CUTE_HOST_DEVICE constexpr
auto
tapply(T0&& t0, T1&& t1, T2&& t2, F&& f, G&& g, seq<I...>)
{
return g(f(get<I>(static_cast<T0&&>(t0)),
get<I>(static_cast<T1&&>(t1)),
get<I>(static_cast<T2&&>(t2)))...);
}
} // end namespace detail
template <class T, class F, class G>
CUTE_HOST_DEVICE constexpr
auto
transform_apply(T&& t, F&& f, G&& g)
{
if constexpr (is_tuple<remove_cvref_t<T>>::value) {
return detail::tapply(static_cast<T&&>(t), f, g, tuple_seq<T>{});
} else {
return g(f(static_cast<T&&>(t)));
}
}
template <class T0, class T1, class F, class G>
CUTE_HOST_DEVICE constexpr
auto
transform_apply(T0&& t0, T1&& t1, F&& f, G&& g)
{
if constexpr (is_tuple<remove_cvref_t<T0>>::value) {
return detail::tapply(static_cast<T0&&>(t0), static_cast<T1&&>(t1), f, g, tuple_seq<T0>{});
} else {
return g(f(static_cast<T0&&>(t0), static_cast<T1&&>(t1)));
}
}
template <class T0, class T1, class T2, class F, class G>
CUTE_HOST_DEVICE constexpr
auto
transform_apply(T0&& t0, T1&& t1, T2&& t2, F&& f, G&& g)
{
if constexpr (is_tuple<remove_cvref_t<T0>>::value) {
return detail::tapply(static_cast<T0&&>(t0), static_cast<T1&&>(t1), static_cast<T2&&>(t2), f, g, tuple_seq<T0>{});
} else {
return g(f(static_cast<T0&&>(t0), static_cast<T1&&>(t1), static_cast<T2&&>(t2)));
}
}
//
// For Each
// (t, f) => f(t_0),f(t_1),...,f(t_n)
//
template <class T, class F>
CUTE_HOST_DEVICE constexpr
void
for_each(T&& t, F&& f)
{
if constexpr (is_tuple<remove_cvref_t<T>>::value) {
return detail::apply(t, [&](auto&&... a) { (f(static_cast<decltype(a)&&>(a)), ...); }, tuple_seq<T>{});
} else {
return f(static_cast<T&&>(t));
}
CUTE_GCC_UNREACHABLE;
}
template <class T, class F>
CUTE_HOST_DEVICE constexpr
auto
for_each_leaf(T&& t, F&& f)
{
if constexpr (is_tuple<remove_cvref_t<T>>::value) {
return detail::apply(static_cast<T&&>(t), [&](auto&&... a){ return (for_each_leaf(static_cast<decltype(a)&&>(a), f), ...); }, tuple_seq<T>{});
} else {
return f(static_cast<T&&>(t));
}
CUTE_GCC_UNREACHABLE;
}
//
// Transform
// (t, f) => (f(t_0),f(t_1),...,f(t_n))
//
template <class T, class F>
CUTE_HOST_DEVICE constexpr
auto
transform(T const& t, F&& f)
{
if constexpr (is_tuple<T>::value) {
return detail::tapply(t, f, [](auto const&... a){ return cute::make_tuple(a...); }, tuple_seq<T>{});
} else {
return f(t);
}
CUTE_GCC_UNREACHABLE;
}
template <class T0, class T1, class F>
CUTE_HOST_DEVICE constexpr
auto
transform(T0 const& t0, T1 const& t1, F&& f)
{
if constexpr (is_tuple<T0>::value) {
static_assert(tuple_size<T0>::value == tuple_size<T1>::value, "Mismatched tuple_size");
return detail::tapply(t0, t1, f, [](auto const&... a){ return cute::make_tuple(a...); }, tuple_seq<T0>{});
} else {
return f(t0, t1);
}
CUTE_GCC_UNREACHABLE;
}
template <class T0, class T1, class T2, class F>
CUTE_HOST_DEVICE constexpr
auto
transform(T0 const& t0, T1 const& t1, T2 const& t2, F&& f)
{
if constexpr (is_tuple<T0>::value) {
static_assert(tuple_size<T0>::value == tuple_size<T1>::value, "Mismatched tuple_size");
static_assert(tuple_size<T0>::value == tuple_size<T2>::value, "Mismatched tuple_size");
return detail::tapply(t0, t1, t2, f, [](auto const&... a){ return cute::make_tuple(a...); }, tuple_seq<T0>{});
} else {
return f(t0, t1, t2);
}
CUTE_GCC_UNREACHABLE;
}
template <class T, class F>
CUTE_HOST_DEVICE constexpr
auto
transform_leaf(T const& t, F&& f)
{
if constexpr (is_tuple<T>::value) {
return transform(t, [&](auto const& a) { return transform_leaf(a, f); });
} else {
return f(t);
}
CUTE_GCC_UNREACHABLE;
}
template <class T0, class T1, class F>
CUTE_HOST_DEVICE constexpr
auto
transform_leaf(T0 const& t0, T1 const& t1, F&& f)
{
if constexpr (is_tuple<T0>::value) {
return transform(t0, t1, [&](auto const& a, auto const& b) { return transform_leaf(a, b, f); });
} else {
return f(t0, t1);
}
CUTE_GCC_UNREACHABLE;
}
//
// find and find_if
//
namespace detail {
template <class T, class F, int I, int... Is>
CUTE_HOST_DEVICE constexpr
auto
find_if(T const& t, F&& f, seq<I,Is...>)
{
if constexpr (decltype(f(get<I>(t)))::value) {
return cute::C<I>{};
} else
if constexpr (sizeof...(Is) == 0) {
return cute::C<I+1>{};
} else {
return find_if(t, f, seq<Is...>{});
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
template <class T, class F>
CUTE_HOST_DEVICE constexpr
auto
find_if(T const& t, F&& f)
{
if constexpr (is_tuple<T>::value) {
return detail::find_if(t, f, tuple_seq<T>{});
} else {
return cute::C<decltype(f(t))::value ? 0 : 1>{};
}
CUTE_GCC_UNREACHABLE;
}
template <class T, class X>
CUTE_HOST_DEVICE constexpr
auto
find(T const& t, X const& x)
{
return find_if(t, [&](auto const& v) { return v == x; }); // This should always return a static true/false
}
template <class T, class F>
CUTE_HOST_DEVICE constexpr
auto
any_of(T const& t, F&& f)
{
if constexpr (is_tuple<T>::value) {
return detail::apply(cute::transform(t, f), [&] (auto const&... a) { return (false_type{} || ... || a); }, tuple_seq<T>{});
} else {
return f(t);
}
CUTE_GCC_UNREACHABLE;
}
template <class T, class F>
CUTE_HOST_DEVICE constexpr
auto
all_of(T const& t, F&& f)
{
if constexpr (is_tuple<T>::value) {
return detail::apply(t, [&] (auto const&... a) { return (true_type{} && ... && f(a)); }, tuple_seq<T>{});
} else {
return f(t);
}
CUTE_GCC_UNREACHABLE;
}
template <class T, class F>
CUTE_HOST_DEVICE constexpr
auto
none_of(T const& t, F&& f)
{
return not any_of(t, f);
}
//
// Filter
// (t, f) => <f(t_0),f(t_1),...,f(t_n)>
//
template <class T, class F>
CUTE_HOST_DEVICE constexpr
auto
filter_tuple(T const& t, F&& f)
{
return transform_apply(t, f, [](auto const&... a) { return cute::tuple_cat(a...); });
}
template <class T0, class T1, class F>
CUTE_HOST_DEVICE constexpr
auto
filter_tuple(T0 const& t0, T1 const& t1, F&& f)
{
return transform_apply(t0, t1, f, [](auto const&... a) { return cute::tuple_cat(a...); });
}
template <class T0, class T1, class T2, class F>
CUTE_HOST_DEVICE constexpr
auto
filter_tuple(T0 const& t0, T1 const& t1, T2 const& t2, F&& f)
{
return transform_apply(t0, t1, t2, f, [](auto const&... a) { return cute::tuple_cat(a...); });
}
//
// Fold (Reduce, Accumulate)
// (t, v, f) => f(...f(f(v,t_0),t_1),...,t_n)
//
namespace detail {
// This impl compiles much faster than cute::apply and variadic args
template <class T, class V, class F>
CUTE_HOST_DEVICE constexpr
auto
fold(T&&, V&& v, F&&, seq<>)
{
return v;
}
template <class T, class V, class F, int I0>
CUTE_HOST_DEVICE constexpr
auto
fold(T&& t, V&& v, F&& f, seq<I0>)
{
return f(static_cast<V&&>(v), get<I0>(static_cast<T&&>(t)));
}
template <class T, class V, class F, int I0, int I1>
CUTE_HOST_DEVICE constexpr
auto
fold(T&& t, V&& v, F&& f, seq<I0,I1>)
{
return f(f(static_cast<V&&>(v), get<I0>(static_cast<T&&>(t))), get<I1>(static_cast<T&&>(t)));
}
template <class T, class V, class F, int I0, int I1, int I2>
CUTE_HOST_DEVICE constexpr
auto
fold(T&& t, V&& v, F&& f, seq<I0,I1,I2>)
{
return f(f(f(static_cast<V&&>(v), get<I0>(static_cast<T&&>(t))), get<I1>(static_cast<T&&>(t))), get<I2>(static_cast<T&&>(t)));
}
template <class T, class V, class F, int I0, int I1, int I2, int I3>
CUTE_HOST_DEVICE constexpr
auto
fold(T&& t, V&& v, F&& f, seq<I0,I1,I2,I3>)
{
return f(f(f(f(static_cast<V&&>(v), get<I0>(static_cast<T&&>(t))), get<I1>(static_cast<T&&>(t))), get<I2>(static_cast<T&&>(t))), get<I3>(static_cast<T&&>(t)));
}
template <class T, class V, class F, int I0, int I1, int I2, int I3, int... Is>
CUTE_HOST_DEVICE constexpr
auto
fold(T&& t, V&& v, F&& f, seq<I0,I1,I2,I3,Is...>)
{
return fold(static_cast<T&&>(t),
f(f(f(f(static_cast<V&&>(v), get<I0>(static_cast<T&&>(t))), get<I1>(static_cast<T&&>(t))), get<I2>(static_cast<T&&>(t))), get<I3>(static_cast<T&&>(t))),
f,
seq<Is...>{});
}
} // end namespace detail
template <class T, class V, class F>
CUTE_HOST_DEVICE constexpr
auto
fold(T&& t, V&& v, F&& f)
{
if constexpr (is_tuple<remove_cvref_t<T>>::value) {
return detail::fold(static_cast<T&&>(t),
static_cast<V&&>(v),
f,
tuple_seq<T>{});
} else {
return f(static_cast<V&&>(v), static_cast<T&&>(t));
}
CUTE_GCC_UNREACHABLE;
}
template <class T, class F>
CUTE_HOST_DEVICE constexpr
auto
fold_first(T&& t, F&& f)
{
if constexpr (is_tuple<remove_cvref_t<T>>::value) {
return detail::fold(static_cast<T&&>(t),
get<0>(static_cast<T&&>(t)),
f,
make_range<1,tuple_size<remove_cvref_t<T>>::value>{});
} else {
return t;
}
CUTE_GCC_UNREACHABLE;
}
//
// front, back, take, select, unwrap
//
// Get the first non-tuple element in a hierarchical tuple
template <class T>
CUTE_HOST_DEVICE constexpr
decltype(auto)
front(T&& t)
{
if constexpr (is_tuple<remove_cvref_t<T>>::value) {
return front(get<0>(static_cast<T&&>(t)));
} else {
return static_cast<T&&>(t);
}
CUTE_GCC_UNREACHABLE;
}
// Get the last non-tuple element in a hierarchical tuple
template <class T>
CUTE_HOST_DEVICE constexpr
decltype(auto)
back(T&& t)
{
if constexpr (is_tuple<remove_cvref_t<T>>::value) {
constexpr int N = tuple_size<remove_cvref_t<T>>::value;
// MSVC needs a bit of extra help here deducing return types.
// We help it by peeling off the nonrecursive case a level "early."
if constexpr (! is_tuple<remove_cvref_t<decltype(get<N - 1>(static_cast<T&&>(t)))>>::value) {
return get<N - 1>(static_cast<T&&>(t));
} else {
return back(get<N - 1>(static_cast<T&&>(t)));
}
} else {
return static_cast<T&&>(t);
}
CUTE_GCC_UNREACHABLE;
}
// Takes the elements in the range [B,E)
template <int B, int E, class T>
CUTE_HOST_DEVICE constexpr
auto
take(T const& t)
{
return detail::apply(t, [](auto const&... a) { return cute::make_tuple(a...); }, make_range<B,E>{});
}
//
// Select tuple elements with given indices.
//
template <int... I, class T>
CUTE_HOST_DEVICE constexpr
auto
select(T const& t)
{
return cute::make_tuple(get<I>(t)...);
}
template <class T, class Indices>
CUTE_HOST_DEVICE constexpr
auto
select(T const& t, Indices const& indices)
{
if constexpr (is_tuple<Indices>::value) {
return cute::transform(indices, [&t](auto i) { return select(t, i); });
} else {
static_assert(is_static<Indices>::value, "Order must be static");
return get<Indices::value>(t);
}
}
// Wrap non-tuples into rank-1 tuples or forward
template <class T>
CUTE_HOST_DEVICE constexpr
auto
wrap(T const& t)
{
if constexpr (is_tuple<T>::value) {
return t;
} else {
return cute::make_tuple(t);
}
CUTE_GCC_UNREACHABLE;
}
// Unwrap rank-1 tuples until we're left with a rank>1 tuple or a non-tuple
template <class T>
CUTE_HOST_DEVICE constexpr
auto
unwrap(T const& t)
{
if constexpr (is_tuple<T>::value) {
if constexpr (tuple_size<T>::value == 1) {
return unwrap(get<0>(t));
} else {
return t;
}
} else {
return t;
}
CUTE_GCC_UNREACHABLE;
}
//
// Flatten and Unflatten
//
template <class T>
struct is_flat : true_type {};
template <class... Ts>
struct is_flat<tuple<Ts...>> : bool_constant<(true && ... && (not is_tuple<Ts>::value))> {};
// Flatten a hierarchical tuple to a tuple of depth one
// and wrap non-tuples into a rank-1 tuple.
template <class T>
CUTE_HOST_DEVICE constexpr
auto
flatten_to_tuple(T const& t)
{
if constexpr (is_tuple<T>::value) {
if constexpr (is_flat<T>::value) { // Shortcut for perf
return t;
} else {
return filter_tuple(t, [](auto const& a) { return flatten_to_tuple(a); });
}
} else {
return cute::make_tuple(t);
}
CUTE_GCC_UNREACHABLE;
}
// Flatten a hierarchical tuple to a tuple of depth one
// and leave non-tuple untouched.
template <class T>
CUTE_HOST_DEVICE constexpr
auto
flatten(T const& t)
{
if constexpr (is_tuple<T>::value) {
if constexpr (is_flat<T>::value) { // Shortcut for perf
return t;
} else {
return filter_tuple(t, [](auto const& a) { return flatten_to_tuple(a); });
}
} else {
return t;
}
CUTE_GCC_UNREACHABLE;
}
namespace detail {
template <class FlatTuple, class TargetProfile>
CUTE_HOST_DEVICE constexpr
auto
unflatten_impl(FlatTuple const& flat_tuple, TargetProfile const& target_profile)
{
if constexpr (is_tuple<TargetProfile>::value) {
return fold(target_profile, cute::make_tuple(cute::make_tuple(), flat_tuple), [](auto const& v, auto const& t) {
auto [result, remaining_tuple] = v;
auto [sub_result, sub_tuple] = unflatten_impl(remaining_tuple, t);
return cute::make_tuple(append(result, sub_result), sub_tuple);
});
} else {
return cute::make_tuple(get<0>(flat_tuple), take<1, decltype(rank(flat_tuple))::value>(flat_tuple));
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
// Unflatten a flat tuple into a hierarchical tuple
// @pre flatten(@a flat_tuple) == @a flat_tuple
// @pre rank(flatten(@a target_profile)) == rank(@a flat_tuple)
// @post congruent(@a result, @a target_profile)
// @post flatten(@a result) == @a flat_tuple
template <class FlatTuple, class TargetProfile>
CUTE_HOST_DEVICE constexpr
auto
unflatten(FlatTuple const& flat_tuple, TargetProfile const& target_profile)
{
auto [unflatten_tuple, flat_remainder] = detail::unflatten_impl(flat_tuple, target_profile);
CUTE_STATIC_ASSERT_V(rank(flat_remainder) == Int<0>{});
return unflatten_tuple;
}
//
// insert and remove and replace
//
namespace detail {
// Shortcut around cute::tuple_cat for common insert/remove/repeat cases
template <class T, class X, int... I, int... J, int... K>
CUTE_HOST_DEVICE constexpr
auto
construct(T const& t, X const& x, seq<I...>, seq<J...>, seq<K...>)
{
return cute::make_tuple(get<I>(t)..., (void(J),x)..., get<K>(t)...);
}
} // end namespace detail
// Insert x into the Nth position of the tuple
template <int N, class T, class X>
CUTE_HOST_DEVICE constexpr
auto
insert(T const& t, X const& x)
{
return detail::construct(t, x, make_seq<N>{}, seq<0>{}, make_range<N,tuple_size<T>::value>{});
}
// Remove the Nth element of the tuple
template <int N, class T>
CUTE_HOST_DEVICE constexpr
auto
remove(T const& t)
{
return detail::construct(t, 0, make_seq<N>{}, seq<>{}, make_range<N+1,tuple_size<T>::value>{});
}
// Replace the Nth element of the tuple with x
template <int N, class T, class X>
CUTE_HOST_DEVICE constexpr
auto
replace(T const& t, X const& x)
{
if constexpr (is_tuple<T>::value) {
return detail::construct(t, x, make_seq<N>{}, seq<0>{}, make_range<N+1,tuple_size<T>::value>{});
} else {
static_assert(N == 0);
return x;
}
CUTE_GCC_UNREACHABLE;
}
// Replace the first element of the tuple with x
template <class T, class X>
CUTE_HOST_DEVICE constexpr
auto
replace_front(T const& t, X const& x)
{
if constexpr (is_tuple<T>::value) {
return detail::construct(t, x, seq<>{}, seq<0>{}, make_range<1,tuple_size<T>::value>{});
} else {
return x;
}
CUTE_GCC_UNREACHABLE;
}
// Replace the last element of the tuple with x
template <class T, class X>
CUTE_HOST_DEVICE constexpr
auto
replace_back(T const& t, X const& x)
{
if constexpr (is_tuple<T>::value) {
return detail::construct(t, x, make_seq<tuple_size<T>::value-1>{}, seq<0>{}, seq<>{});
} else {
return x;
}
CUTE_GCC_UNREACHABLE;
}
//
// Make a tuple of Xs of tuple_size N
//
template <int N, class X>
CUTE_HOST_DEVICE constexpr
auto
tuple_repeat(X const& x)
{
return detail::construct(0, x, seq<>{}, make_seq<N>{}, seq<>{});
}
//
// Make repeated Xs of rank N
//
template <int N, class X>
CUTE_HOST_DEVICE constexpr
auto
repeat(X const& x)
{
if constexpr (N == 1) {
return x;
} else {
return detail::construct(0, x, seq<>{}, make_seq<N>{}, seq<>{});
}
CUTE_GCC_UNREACHABLE;
}
//
// Make a tuple of Xs the same profile as tuple T
//
template <class T, class X>
CUTE_HOST_DEVICE constexpr
auto
repeat_like(T const& t, X const& x)
{
if constexpr (is_tuple<T>::value) {
return transform(t, [&](auto const& a) { return repeat_like(a,x); });
} else {
return x;
}
CUTE_GCC_UNREACHABLE;
}
// Group the elements [B,E) of a T into a single element
// e.g. group<2,4>(T<_1,_2,_3,_4,_5,_6>{})
// => T<_1,_2,T<_3,_4>,_5,_6>{}
template <int B, int E, class T>
CUTE_HOST_DEVICE constexpr
auto
group(T const& t)
{
if constexpr (not is_tuple<T>::value) {
if constexpr (E == -1) {
return group<B,1>(t);
} else {
return detail::construct(t, take<B,E>(t), make_seq<B>{}, make_seq<(B < E)>{}, make_range<E,1>{});
}
} else
if constexpr (E == -1) {
return group<B,tuple_size<T>::value>(t);
} else
if constexpr (B <= E) {
return detail::construct(t, take<B,E>(t), make_seq<B>{}, make_seq<(B < E)>{}, make_range<E,tuple_size<T>::value>{});
} else {
static_assert(B <= E);
}
CUTE_GCC_UNREACHABLE;
}
//
// Extend a T to rank N by appending/prepending an element
//
template <int N, class T, class X>
CUTE_HOST_DEVICE constexpr
auto
append(T const& a, X const& x)
{
if constexpr (is_tuple<T>::value) {
if constexpr (N == tuple_size<T>::value) {
return a;
} else {
static_assert(N > tuple_size<T>::value);
return detail::construct(a, x, make_seq<tuple_size<T>::value>{}, make_seq<N-tuple_size<T>::value>{}, seq<>{});
}
} else {
if constexpr (N == 1) {
return a;
} else {
return detail::construct(cute::make_tuple(a), x, seq<0>{}, make_seq<N-1>{}, seq<>{});
}
}
CUTE_GCC_UNREACHABLE;
}
template <class T, class X>
CUTE_HOST_DEVICE constexpr
auto
append(T const& a, X const& x)
{
if constexpr (is_tuple<T>::value) {
return detail::construct(a, x, make_seq<tuple_size<T>::value>{}, seq<0>{}, seq<>{});
} else {
return cute::make_tuple(a, x);
}
CUTE_GCC_UNREACHABLE;
}
template <int N, class T, class X>
CUTE_HOST_DEVICE constexpr
auto
prepend(T const& a, X const& x)
{
if constexpr (is_tuple<T>::value) {
if constexpr (N == tuple_size<T>::value) {
return a;
} else {
static_assert(N > tuple_size<T>::value);
return detail::construct(a, x, seq<>{}, make_seq<N-tuple_size<T>::value>{}, make_seq<tuple_size<T>::value>{});
}
} else {
if constexpr (N == 1) {
return a;
} else {
static_assert(N > 1);
return detail::construct(cute::make_tuple(a), x, seq<>{}, make_seq<N-1>{}, seq<0>{});
}
}
CUTE_GCC_UNREACHABLE;
}
template <class T, class X>
CUTE_HOST_DEVICE constexpr
auto
prepend(T const& a, X const& x)
{
if constexpr (is_tuple<T>::value) {
return detail::construct(a, x, seq<>{}, seq<0>{}, make_seq<tuple_size<T>::value>{});
} else {
return cute::make_tuple(x, a);
}
CUTE_GCC_UNREACHABLE;
}
//
// Inclusive scan (prefix sum)
//
namespace detail {
template <class T, class V, class F, int I, int... Is>
CUTE_HOST_DEVICE constexpr
auto
iscan(T const& t, V const& v, F&& f, seq<I,Is...>)
{
// Apply the function to v and the element at I
auto v_next = f(v, get<I>(t));
// Replace I with v_next
auto t_next = replace<I>(t, v_next);
#if 0
std::cout << "ISCAN i" << I << std::endl;
std::cout << " t " << t << std::endl;
std::cout << " i " << v << std::endl;
std::cout << " f(i,t) " << v_next << std::endl;
std::cout << " t_n " << t_next << std::endl;
#endif
if constexpr (sizeof...(Is) == 0) {
return t_next;
} else {
return iscan(t_next, v_next, f, seq<Is...>{});
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
template <class T, class V, class F>
CUTE_HOST_DEVICE constexpr
auto
iscan(T const& t, V const& v, F&& f)
{
return detail::iscan(t, v, f, tuple_seq<T>{});
}
//
// Exclusive scan (prefix sum)
//
namespace detail {
template <class T, class V, class F, int I, int... Is>
CUTE_HOST_DEVICE constexpr
auto
escan(T const& t, V const& v, F&& f, seq<I,Is...>)
{
if constexpr (sizeof...(Is) == 0) {
// Replace I with v
return replace<I>(t, v);
} else {
// Apply the function to v and the element at I
auto v_next = f(v, get<I>(t));
// Replace I with v
auto t_next = replace<I>(t, v);
#if 0
std::cout << "ESCAN i" << I << std::endl;
std::cout << " t " << t << std::endl;
std::cout << " i " << v << std::endl;
std::cout << " f(i,t) " << v_next << std::endl;
std::cout << " t_n " << t_next << std::endl;
#endif
// Recurse
return escan(t_next, v_next, f, seq<Is...>{});
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
template <class T, class V, class F>
CUTE_HOST_DEVICE constexpr
auto
escan(T const& t, V const& v, F&& f)
{
return detail::escan(t, v, f, tuple_seq<T>{});
}
//
// Zip (Transpose)
//
// Take ((a,b,c,...),(x,y,z,...),...) rank-R0 x rank-R1 input
// to produce ((a,x,...),(b,y,...),(c,z,...),...) rank-R1 x rank-R0 output
namespace detail {
template <int J, class... Ts>
CUTE_HOST_DEVICE constexpr
auto
zip_(Ts const&... ts)
{
return cute::make_tuple(get<J>(ts)...);
}
template <class T, int... Is, int... Js>
CUTE_HOST_DEVICE constexpr
auto
zip(T const& t, seq<Is...>, seq<Js...>)
{
static_assert(conjunction<bool_constant<tuple_size<tuple_element_t<0,T>>::value == tuple_size<tuple_element_t<Is,T>>::value>...>::value, "Mismatched Ranks");
return cute::make_tuple(zip_<Js>(get<Is>(t)...)...);
}
} // end namespace detail
template <class T>
CUTE_HOST_DEVICE constexpr
auto
zip(T const& t)
{
if constexpr (is_tuple<T>::value) {
if constexpr (is_tuple<tuple_element_t<0,T>>::value) {
return detail::zip(t, tuple_seq<T>{}, tuple_seq<tuple_element_t<0,T>>{});
} else {
return cute::make_tuple(t);
}
} else {
return t;
}
CUTE_GCC_UNREACHABLE;
}
// Convenient to pass them in separately
template <class T0, class T1, class... Ts>
CUTE_HOST_DEVICE constexpr
auto
zip(T0 const& t0, T1 const& t1, Ts const&... ts)
{
return zip(cute::make_tuple(t0, t1, ts...));
}
//
// zip2_by -- A guided zip for rank-2 tuples
// Take a tuple like ((A,a),((B,b),(C,c)),d)
// and produce a tuple ((A,(B,C)),(a,(b,c),d))
// where the rank-2 modes are selected by the terminals of the guide (X,(X,X))
//
namespace detail {
template <class T, class TG, int... Is, int... Js>
CUTE_HOST_DEVICE constexpr
auto
zip2_by(T const& t, TG const& guide, seq<Is...>, seq<Js...>)
{
// zip2_by produces the modes like ((A,a),(B,b),...)
auto split = cute::make_tuple(zip2_by(get<Is>(t), get<Is>(guide))...);
// Rearrange and append missing modes from t to make ((A,B,...),(a,b,...,x,y))
return cute::make_tuple(cute::make_tuple(get<0>(get<Is>(split))...),
cute::make_tuple(get<1>(get<Is>(split))..., get<Js>(t)...));
}
} // end namespace detail
template <class T, class TG>
CUTE_HOST_DEVICE constexpr
auto
zip2_by(T const& t, TG const& guide)
{
if constexpr (is_tuple<TG>::value) {
constexpr int TR = tuple_size<T>::value;
constexpr int GR = tuple_size<TG>::value;
static_assert(TR >= GR, "Mismatched ranks");
return detail::zip2_by(t, guide,
make_range< 0, GR>{},
make_range<GR, TR>{});
} else {
static_assert(tuple_size<T>::value == 2, "Mismatched ranks");
return t;
}
CUTE_GCC_UNREACHABLE;
}
/// @return A tuple of the elements of @c t in reverse order.
template <class T>
CUTE_HOST_DEVICE constexpr
auto
reverse(T const& t)
{
if constexpr (is_tuple<T>::value) {
return detail::apply(t, [](auto const&... a){ return cute::make_tuple(a...); }, tuple_rseq<T>{});
} else {
return t;
}
}
} // end namespace cute
| include/cute/algorithm/tuple_algorithms.hpp/0 | {
"file_path": "include/cute/algorithm/tuple_algorithms.hpp",
"repo_id": "include",
"token_count": 11965
} | 11 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/arch/mma.hpp>
// Config
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) && defined(__CUDA_ARCH_FEAT_SM90_ALL))
# define CUTE_ARCH_MMA_SM90A_ENABLED
#endif
namespace cute {
////////////////////////////////////////////////////////////////////////////////////////////////////
// Warpgroup sync primitives
CUTE_HOST_DEVICE
void
warpgroup_arrive()
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile ("wgmma.fence.sync.aligned;\n" ::: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use wgmma.fence without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
template <int N>
CUTE_HOST_DEVICE
void
warpgroup_wait()
{
static_assert(N >= 0 && N <= 7, "WGMMA wait: N must be in range [0, 7]");
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile("wgmma.wait_group.sync.aligned %0;\n" :: "n"(N) : "memory");
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use wgmma.wait_group<N> without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
// Marks the commit point for one or more sized batch of warpgroup MMAs.
CUTE_HOST_DEVICE
void
warpgroup_commit_batch()
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile("wgmma.commit_group.sync.aligned;\n" ::: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use wgmma.commit_group without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
CUTE_HOST_DEVICE
void
warpgroup_fence_operand(uint32_t& reg) {
// MSVC emits a build error for 'asm volatile'
// even if it only occurs in a __device__ function.
// This prevents the error.
#if defined(__CUDA_ARCH__)
asm volatile("" : "+r"(reg) :: "memory");
#endif
}
CUTE_HOST_DEVICE
void
warpgroup_fence_operand(float& reg) {
#if defined(__CUDA_ARCH__)
asm volatile("" : "+f"(reg) :: "memory");
#endif
}
namespace GMMA {
enum class Major {
K = 0,
MN = 1
};
enum class ScaleOut {
Zero = 0,
One = 1
};
enum class ScaleIn {
Neg = -1,
One = 1
};
} // namespace GMMA
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA PTX definitions: C = (scaleA * A) * (scaleB * B) + (scaleD * C)
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %4, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k16.f16.f16.f16 "
"{%0, %1},"
" %2,"
" %3,"
" p, %5, %6, %7, %8;\n"
"}\n"
: "+r"(d0), "+r"(d1)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[2];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %7, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k16.f16.f16.f16 "
"{%0, %1},"
"{%2, %3, %4, %5},"
" %6,"
" p, %8, %9, %10;\n"
"}\n"
: "+r"(d0), "+r"(d1)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k16.f16.f16.f16 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p, %7, %8, %9, %10;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k16.f16.f16.f16 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p, %10, %11, %12;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p, %11, %12, %13, %14;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p, %14, %15, %16;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[12];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %14, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11},"
" %12,"
" %13,"
" p, %15, %16, %17, %18;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[12];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %17, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11},"
"{%12, %13, %14, %15},"
" %16,"
" p, %18, %19, %20;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p, %19, %20, %21, %22;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p, %22, %23, %24;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[20];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %22, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19},"
" %20,"
" %21,"
" p, %23, %24, %25, %26;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[20];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %25, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19},"
"{%20, %21, %22, %23},"
" %24,"
" p, %26, %27, %28;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p, %27, %28, %29, %30;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p, %30, %31, %32;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[28];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %30, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27},"
" %28,"
" %29,"
" p, %31, %32, %33, %34;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[28];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %33, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27},"
"{%28, %29, %30, %31},"
" %32,"
" p, %34, %35, %36;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p, %35, %36, %37, %38;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p, %38, %39, %40;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[36];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %38, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35},"
" %36,"
" %37,"
" p, %39, %40, %41, %42;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[36];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %41, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35},"
"{%36, %37, %38, %39},"
" %40,"
" p, %42, %43, %44;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p, %43, %44, %45, %46;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p, %46, %47, %48;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[44];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %46, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43},"
" %44,"
" %45,"
" p, %47, %48, %49, %50;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[44];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %49, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43},"
"{%44, %45, %46, %47},"
" %48,"
" p, %50, %51, %52;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p, %51, %52, %53, %54;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p, %54, %55, %56;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[52];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %54, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51},"
" %52,"
" %53,"
" p, %55, %56, %57, %58;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[52];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %57, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51},"
"{%52, %53, %54, %55},"
" %56,"
" p, %58, %59, %60;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p, %59, %60, %61, %62;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p, %62, %63, %64;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[60];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %62, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59},"
" %60,"
" %61,"
" p, %63, %64, %65, %66;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[60];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %65, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59},"
"{%60, %61, %62, %63},"
" %64,"
" p, %66, %67, %68;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x16_F16F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p, %67, %68, %69, %70;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x16_F16F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x16 F16+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x16_F16F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k16.f16.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p, %70, %71, %72;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x16_F16F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k16.f32.f16.f16 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p, %7, %8, %9, %10;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[4];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k16.f32.f16.f16 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p, %10, %11, %12;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
float & d4, float & d5, float & d6, float & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p, %11, %12, %13, %14;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3),
"+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[8];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
float & d4, float & d5, float & d6, float & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p, %14, %15, %16;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3),
"+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p, %19, %20, %21, %22;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[16];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p, %22, %23, %24;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p, %27, %28, %29, %30;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[24];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p, %30, %31, %32;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p, %35, %36, %37, %38;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[32];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p, %38, %39, %40;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p, %43, %44, %45, %46;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[40];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p, %46, %47, %48;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p, %51, %52, %53, %54;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[48];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p, %54, %55, %56;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p, %59, %60, %61, %62;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[56];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p, %62, %63, %64;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p, %67, %68, %69, %70;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[64];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p, %70, %71, %72;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[72];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %74, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
" %72,"
" %73,"
" p, %75, %76, %77, %78;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[72];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %77, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
"{%72, %73, %74, %75},"
" %76,"
" p, %78, %79, %80;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[80];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %82, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
" %80,"
" %81,"
" p, %83, %84, %85, %86;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[80];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %85, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
"{%80, %81, %82, %83},"
" %84,"
" p, %86, %87, %88;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[88];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %90, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
" %88,"
" %89,"
" p, %91, %92, %93, %94;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[88];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %93, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
"{%88, %89, %90, %91},"
" %92,"
" p, %94, %95, %96;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[96];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
float & d88, float & d89, float & d90, float & d91,
float & d92, float & d93, float & d94, float & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %98, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
" %96,"
" %97,"
" p, %99, %100, %101, %102;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87),
"+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91),
"+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[96];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
float & d88, float & d89, float & d90, float & d91,
float & d92, float & d93, float & d94, float & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %101, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
"{%96, %97, %98, %99},"
" %100,"
" p, %102, %103, %104;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87),
"+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91),
"+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[104];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %106, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
" %104,"
" %105,"
" p, %107, %108, %109, %110;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[104];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %109, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
"{%104, %105, %106, %107},"
" %108,"
" p, %110, %111, %112;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[112];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %114, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
" %112,"
" %113,"
" p, %115, %116, %117, %118;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[112];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %117, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
"{%112, %113, %114, %115},"
" %116,"
" p, %118, %119, %120;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[120];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %122, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
" %120,"
" %121,"
" p, %123, %124, %125, %126;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[120];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %125, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
"{%120, %121, %122, %123},"
" %124,"
" p, %126, %127, %128;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x16_F32F16F16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[128];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
float & d120, float & d121, float & d122, float & d123,
float & d124, float & d125, float & d126, float & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %130, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
" %128,"
" %129,"
" p, %131, %132, %133, %134;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119),
"+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123),
"+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x16_F32F16F16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x16 F32+=F16*F16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x16_F32F16F16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[128];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
float & d120, float & d121, float & d122, float & d123,
float & d124, float & d125, float & d126, float & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %133, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k16.f32.f16.f16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
"{%128, %129, %130, %131},"
" %132,"
" p, %134, %135, %136;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119),
"+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123),
"+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x16_F32F16F16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k16.f32.bf16.bf16 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p, %7, %8, %9, %10;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[4];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k16.f32.bf16.bf16 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p, %10, %11, %12;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
float & d4, float & d5, float & d6, float & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p, %11, %12, %13, %14;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3),
"+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[8];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
float & d4, float & d5, float & d6, float & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p, %14, %15, %16;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3),
"+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p, %19, %20, %21, %22;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[16];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p, %22, %23, %24;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p, %27, %28, %29, %30;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[24];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p, %30, %31, %32;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p, %35, %36, %37, %38;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[32];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p, %38, %39, %40;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p, %43, %44, %45, %46;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[40];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p, %46, %47, %48;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p, %51, %52, %53, %54;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[48];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p, %54, %55, %56;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p, %59, %60, %61, %62;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[56];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p, %62, %63, %64;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p, %67, %68, %69, %70;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[64];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p, %70, %71, %72;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[72];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %74, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
" %72,"
" %73,"
" p, %75, %76, %77, %78;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[72];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %77, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
"{%72, %73, %74, %75},"
" %76,"
" p, %78, %79, %80;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[80];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %82, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
" %80,"
" %81,"
" p, %83, %84, %85, %86;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[80];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %85, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
"{%80, %81, %82, %83},"
" %84,"
" p, %86, %87, %88;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[88];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %90, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
" %88,"
" %89,"
" p, %91, %92, %93, %94;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[88];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %93, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
"{%88, %89, %90, %91},"
" %92,"
" p, %94, %95, %96;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[96];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
float & d88, float & d89, float & d90, float & d91,
float & d92, float & d93, float & d94, float & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %98, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
" %96,"
" %97,"
" p, %99, %100, %101, %102;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87),
"+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91),
"+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[96];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
float & d88, float & d89, float & d90, float & d91,
float & d92, float & d93, float & d94, float & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %101, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
"{%96, %97, %98, %99},"
" %100,"
" p, %102, %103, %104;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87),
"+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91),
"+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[104];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %106, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
" %104,"
" %105,"
" p, %107, %108, %109, %110;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[104];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %109, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
"{%104, %105, %106, %107},"
" %108,"
" p, %110, %111, %112;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[112];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %114, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
" %112,"
" %113,"
" p, %115, %116, %117, %118;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[112];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %117, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
"{%112, %113, %114, %115},"
" %116,"
" p, %118, %119, %120;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[120];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %122, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
" %120,"
" %121,"
" p, %123, %124, %125, %126;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[120];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %125, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
"{%120, %121, %122, %123},"
" %124,"
" p, %126, %127, %128;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x16_F32BF16BF16_SS
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[128];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
float & d120, float & d121, float & d122, float & d123,
float & d124, float & d125, float & d126, float & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %130, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
" %128,"
" %129,"
" p, %131, %132, %133, %134;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119),
"+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123),
"+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspA)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x16_F32BF16BF16_SS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x16 F32+=BF16*BF16
template <
GMMA::Major tnspA,
GMMA::Major tnspB,
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x16_F32BF16BF16_RS
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[128];
static_assert(tnspA == GMMA::Major::K,
"Register source operand A must have K major layout.");
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
float & d120, float & d121, float & d122, float & d123,
float & d124, float & d125, float & d126, float & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %133, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k16.f32.bf16.bf16 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
"{%128, %129, %130, %131},"
" %132,"
" p, %134, %135, %136;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119),
"+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123),
"+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)), "n"(int32_t(tnspB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x16_F32BF16BF16_RS without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k8.f32.tf32.tf32 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p, %7, %8;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k8.f32.tf32.tf32 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p, %10, %11;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
float & d4, float & d5, float & d6, float & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p, %11, %12;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3),
"+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
float & d4, float & d5, float & d6, float & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p, %14, %15;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3),
"+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p, %19, %20;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p, %22, %23;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p, %27, %28;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p, %30, %31;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p, %35, %36;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p, %38, %39;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p, %43, %44;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p, %46, %47;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p, %51, %52;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p, %54, %55;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p, %59, %60;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p, %62, %63;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p, %67, %68;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p, %70, %71;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[72];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %74, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
" %72,"
" %73,"
" p, %75, %76;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[72];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %77, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
"{%72, %73, %74, %75},"
" %76,"
" p, %78, %79;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[80];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %82, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
" %80,"
" %81,"
" p, %83, %84;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[80];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %85, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
"{%80, %81, %82, %83},"
" %84,"
" p, %86, %87;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[88];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %90, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
" %88,"
" %89,"
" p, %91, %92;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[88];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %93, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
"{%88, %89, %90, %91},"
" %92,"
" p, %94, %95;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[96];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
float & d88, float & d89, float & d90, float & d91,
float & d92, float & d93, float & d94, float & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %98, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
" %96,"
" %97,"
" p, %99, %100;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87),
"+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91),
"+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[96];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
float & d88, float & d89, float & d90, float & d91,
float & d92, float & d93, float & d94, float & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %101, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
"{%96, %97, %98, %99},"
" %100,"
" p, %102, %103;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87),
"+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91),
"+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[104];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %106, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
" %104,"
" %105,"
" p, %107, %108;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[104];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %109, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
"{%104, %105, %106, %107},"
" %108,"
" p, %110, %111;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[112];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %114, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
" %112,"
" %113,"
" p, %115, %116;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[112];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %117, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
"{%112, %113, %114, %115},"
" %116,"
" p, %118, %119;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[120];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %122, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
" %120,"
" %121,"
" p, %123, %124;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[120];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %125, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
"{%120, %121, %122, %123},"
" %124,"
" p, %126, %127;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x8_F32TF32TF32_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[128];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
float & d120, float & d121, float & d122, float & d123,
float & d124, float & d125, float & d126, float & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %130, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
" %128,"
" %129,"
" p, %131, %132;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119),
"+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123),
"+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x8_F32TF32TF32_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x8 TN F32+=TF32*TF32
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x8_F32TF32TF32_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[128];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
float & d120, float & d121, float & d122, float & d123,
float & d124, float & d125, float & d126, float & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %133, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k8.f32.tf32.tf32 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
"{%128, %129, %130, %131},"
" %132,"
" p, %134, %135;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119),
"+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123),
"+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x8_F32TF32TF32_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=S8*S8
struct SM90_64x8x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.s8 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=S8*S8
struct SM90_64x8x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=S8*S8
struct SM90_64x16x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=S8*S8
struct SM90_64x16x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=S8*S8
struct SM90_64x32x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=S8*S8
struct SM90_64x32x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=S8*S8
struct SM90_64x48x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=S8*S8
struct SM90_64x48x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=S8*S8
struct SM90_64x64x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=S8*S8
struct SM90_64x64x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=S8*S8
struct SM90_64x80x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=S8*S8
struct SM90_64x80x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=S8*S8
struct SM90_64x96x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=S8*S8
struct SM90_64x96x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=S8*S8
struct SM90_64x112x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=S8*S8
struct SM90_64x112x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=S8*S8
struct SM90_64x128x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=S8*S8
struct SM90_64x128x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=S8*S8
struct SM90_64x144x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %74, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
" %72,"
" %73,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=S8*S8
struct SM90_64x144x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %74, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
" %72,"
" %73,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=S8*S8
struct SM90_64x160x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %82, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
" %80,"
" %81,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=S8*S8
struct SM90_64x160x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %82, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
" %80,"
" %81,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=S8*S8
struct SM90_64x176x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %90, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
" %88,"
" %89,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=S8*S8
struct SM90_64x176x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %90, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
" %88,"
" %89,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=S8*S8
struct SM90_64x192x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %98, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
" %96,"
" %97,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=S8*S8
struct SM90_64x192x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %98, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
" %96,"
" %97,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=S8*S8
struct SM90_64x208x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %106, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
" %104,"
" %105,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=S8*S8
struct SM90_64x208x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %106, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
" %104,"
" %105,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=S8*S8
struct SM90_64x224x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %114, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
" %112,"
" %113,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=S8*S8
struct SM90_64x224x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %114, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
" %112,"
" %113,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=S8*S8
struct SM90_64x240x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %122, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
" %120,"
" %121,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=S8*S8
struct SM90_64x240x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %122, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
" %120,"
" %121,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=S8*S8
struct SM90_64x256x32_S32S8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %130, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
" %128,"
" %129,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=S8*S8
struct SM90_64x256x32_S32S8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %130, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
" %128,"
" %129,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=S8*S8
struct SM90_64x8x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.s8 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=S8*S8
struct SM90_64x8x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=S8*S8
struct SM90_64x16x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=S8*S8
struct SM90_64x16x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=S8*S8
struct SM90_64x32x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=S8*S8
struct SM90_64x32x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=S8*S8
struct SM90_64x48x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=S8*S8
struct SM90_64x48x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=S8*S8
struct SM90_64x64x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=S8*S8
struct SM90_64x64x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=S8*S8
struct SM90_64x80x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=S8*S8
struct SM90_64x80x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=S8*S8
struct SM90_64x96x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=S8*S8
struct SM90_64x96x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=S8*S8
struct SM90_64x112x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=S8*S8
struct SM90_64x112x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=S8*S8
struct SM90_64x128x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=S8*S8
struct SM90_64x128x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=S8*S8
struct SM90_64x144x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %77, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
"{%72, %73, %74, %75},"
" %76,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=S8*S8
struct SM90_64x144x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %77, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
"{%72, %73, %74, %75},"
" %76,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=S8*S8
struct SM90_64x160x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %85, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
"{%80, %81, %82, %83},"
" %84,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=S8*S8
struct SM90_64x160x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %85, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
"{%80, %81, %82, %83},"
" %84,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=S8*S8
struct SM90_64x176x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %93, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
"{%88, %89, %90, %91},"
" %92,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=S8*S8
struct SM90_64x176x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %93, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
"{%88, %89, %90, %91},"
" %92,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=S8*S8
struct SM90_64x192x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %101, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
"{%96, %97, %98, %99},"
" %100,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=S8*S8
struct SM90_64x192x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %101, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
"{%96, %97, %98, %99},"
" %100,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=S8*S8
struct SM90_64x208x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %109, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
"{%104, %105, %106, %107},"
" %108,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=S8*S8
struct SM90_64x208x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %109, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
"{%104, %105, %106, %107},"
" %108,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=S8*S8
struct SM90_64x224x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %117, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
"{%112, %113, %114, %115},"
" %116,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=S8*S8
struct SM90_64x224x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %117, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
"{%112, %113, %114, %115},"
" %116,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=S8*S8
struct SM90_64x240x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %125, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
"{%120, %121, %122, %123},"
" %124,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=S8*S8
struct SM90_64x240x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %125, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
"{%120, %121, %122, %123},"
" %124,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=S8*S8
struct SM90_64x256x32_S32S8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %133, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
"{%128, %129, %130, %131},"
" %132,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=S8*S8
struct SM90_64x256x32_S32S8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %133, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
"{%128, %129, %130, %131},"
" %132,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=S8*U8
struct SM90_64x8x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.u8 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=S8*U8
struct SM90_64x8x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=S8*U8
struct SM90_64x16x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=S8*U8
struct SM90_64x16x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=S8*U8
struct SM90_64x32x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=S8*U8
struct SM90_64x32x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=S8*U8
struct SM90_64x48x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=S8*U8
struct SM90_64x48x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=S8*U8
struct SM90_64x64x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=S8*U8
struct SM90_64x64x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=S8*U8
struct SM90_64x80x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=S8*U8
struct SM90_64x80x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=S8*U8
struct SM90_64x96x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=S8*U8
struct SM90_64x96x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=S8*U8
struct SM90_64x112x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=S8*U8
struct SM90_64x112x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=S8*U8
struct SM90_64x128x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=S8*U8
struct SM90_64x128x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=S8*U8
struct SM90_64x144x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %74, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
" %72,"
" %73,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=S8*U8
struct SM90_64x144x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %74, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
" %72,"
" %73,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=S8*U8
struct SM90_64x160x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %82, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
" %80,"
" %81,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=S8*U8
struct SM90_64x160x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %82, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
" %80,"
" %81,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=S8*U8
struct SM90_64x176x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %90, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
" %88,"
" %89,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=S8*U8
struct SM90_64x176x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %90, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
" %88,"
" %89,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=S8*U8
struct SM90_64x192x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %98, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
" %96,"
" %97,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=S8*U8
struct SM90_64x192x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %98, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
" %96,"
" %97,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=S8*U8
struct SM90_64x208x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %106, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
" %104,"
" %105,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=S8*U8
struct SM90_64x208x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %106, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
" %104,"
" %105,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=S8*U8
struct SM90_64x224x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %114, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
" %112,"
" %113,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=S8*U8
struct SM90_64x224x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %114, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
" %112,"
" %113,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=S8*U8
struct SM90_64x240x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %122, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
" %120,"
" %121,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=S8*U8
struct SM90_64x240x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %122, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
" %120,"
" %121,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=S8*U8
struct SM90_64x256x32_S32S8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %130, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
" %128,"
" %129,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=S8*U8
struct SM90_64x256x32_S32S8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %130, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
" %128,"
" %129,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=S8*U8
struct SM90_64x8x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.u8 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=S8*U8
struct SM90_64x8x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=S8*U8
struct SM90_64x16x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=S8*U8
struct SM90_64x16x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=S8*U8
struct SM90_64x32x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=S8*U8
struct SM90_64x32x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=S8*U8
struct SM90_64x48x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=S8*U8
struct SM90_64x48x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=S8*U8
struct SM90_64x64x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=S8*U8
struct SM90_64x64x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=S8*U8
struct SM90_64x80x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=S8*U8
struct SM90_64x80x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=S8*U8
struct SM90_64x96x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=S8*U8
struct SM90_64x96x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=S8*U8
struct SM90_64x112x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=S8*U8
struct SM90_64x112x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=S8*U8
struct SM90_64x128x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=S8*U8
struct SM90_64x128x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=S8*U8
struct SM90_64x144x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %77, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
"{%72, %73, %74, %75},"
" %76,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=S8*U8
struct SM90_64x144x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %77, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
"{%72, %73, %74, %75},"
" %76,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=S8*U8
struct SM90_64x160x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %85, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
"{%80, %81, %82, %83},"
" %84,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=S8*U8
struct SM90_64x160x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %85, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
"{%80, %81, %82, %83},"
" %84,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=S8*U8
struct SM90_64x176x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %93, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
"{%88, %89, %90, %91},"
" %92,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=S8*U8
struct SM90_64x176x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %93, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
"{%88, %89, %90, %91},"
" %92,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=S8*U8
struct SM90_64x192x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %101, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
"{%96, %97, %98, %99},"
" %100,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=S8*U8
struct SM90_64x192x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %101, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
"{%96, %97, %98, %99},"
" %100,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=S8*U8
struct SM90_64x208x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %109, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
"{%104, %105, %106, %107},"
" %108,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=S8*U8
struct SM90_64x208x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %109, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
"{%104, %105, %106, %107},"
" %108,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=S8*U8
struct SM90_64x224x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %117, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
"{%112, %113, %114, %115},"
" %116,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=S8*U8
struct SM90_64x224x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %117, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
"{%112, %113, %114, %115},"
" %116,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=S8*U8
struct SM90_64x240x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %125, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
"{%120, %121, %122, %123},"
" %124,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=S8*U8
struct SM90_64x240x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %125, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
"{%120, %121, %122, %123},"
" %124,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=S8*U8
struct SM90_64x256x32_S32S8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %133, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
"{%128, %129, %130, %131},"
" %132,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=S8*U8
struct SM90_64x256x32_S32S8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %133, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.s8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
"{%128, %129, %130, %131},"
" %132,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32S8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=U8*S8
struct SM90_64x8x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.s8 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=U8*S8
struct SM90_64x8x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=U8*S8
struct SM90_64x16x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=U8*S8
struct SM90_64x16x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=U8*S8
struct SM90_64x32x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=U8*S8
struct SM90_64x32x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=U8*S8
struct SM90_64x48x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=U8*S8
struct SM90_64x48x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=U8*S8
struct SM90_64x64x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=U8*S8
struct SM90_64x64x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=U8*S8
struct SM90_64x80x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=U8*S8
struct SM90_64x80x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=U8*S8
struct SM90_64x96x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=U8*S8
struct SM90_64x96x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=U8*S8
struct SM90_64x112x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=U8*S8
struct SM90_64x112x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=U8*S8
struct SM90_64x128x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=U8*S8
struct SM90_64x128x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=U8*S8
struct SM90_64x144x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %74, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
" %72,"
" %73,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=U8*S8
struct SM90_64x144x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %74, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
" %72,"
" %73,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=U8*S8
struct SM90_64x160x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %82, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
" %80,"
" %81,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=U8*S8
struct SM90_64x160x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %82, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
" %80,"
" %81,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=U8*S8
struct SM90_64x176x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %90, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
" %88,"
" %89,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=U8*S8
struct SM90_64x176x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %90, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
" %88,"
" %89,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=U8*S8
struct SM90_64x192x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %98, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
" %96,"
" %97,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=U8*S8
struct SM90_64x192x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %98, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
" %96,"
" %97,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=U8*S8
struct SM90_64x208x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %106, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
" %104,"
" %105,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=U8*S8
struct SM90_64x208x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %106, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
" %104,"
" %105,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=U8*S8
struct SM90_64x224x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %114, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
" %112,"
" %113,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=U8*S8
struct SM90_64x224x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %114, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
" %112,"
" %113,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=U8*S8
struct SM90_64x240x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %122, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
" %120,"
" %121,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=U8*S8
struct SM90_64x240x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %122, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
" %120,"
" %121,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=U8*S8
struct SM90_64x256x32_S32U8S8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %130, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
" %128,"
" %129,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8S8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=U8*S8
struct SM90_64x256x32_S32U8S8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %130, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
" %128,"
" %129,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8S8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=U8*S8
struct SM90_64x8x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.s8 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=U8*S8
struct SM90_64x8x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=U8*S8
struct SM90_64x16x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=U8*S8
struct SM90_64x16x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=U8*S8
struct SM90_64x32x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=U8*S8
struct SM90_64x32x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=U8*S8
struct SM90_64x48x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=U8*S8
struct SM90_64x48x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=U8*S8
struct SM90_64x64x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=U8*S8
struct SM90_64x64x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=U8*S8
struct SM90_64x80x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=U8*S8
struct SM90_64x80x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=U8*S8
struct SM90_64x96x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=U8*S8
struct SM90_64x96x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=U8*S8
struct SM90_64x112x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=U8*S8
struct SM90_64x112x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=U8*S8
struct SM90_64x128x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=U8*S8
struct SM90_64x128x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=U8*S8
struct SM90_64x144x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %77, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
"{%72, %73, %74, %75},"
" %76,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=U8*S8
struct SM90_64x144x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %77, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
"{%72, %73, %74, %75},"
" %76,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=U8*S8
struct SM90_64x160x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %85, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
"{%80, %81, %82, %83},"
" %84,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=U8*S8
struct SM90_64x160x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %85, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
"{%80, %81, %82, %83},"
" %84,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=U8*S8
struct SM90_64x176x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %93, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
"{%88, %89, %90, %91},"
" %92,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=U8*S8
struct SM90_64x176x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %93, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
"{%88, %89, %90, %91},"
" %92,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=U8*S8
struct SM90_64x192x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %101, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
"{%96, %97, %98, %99},"
" %100,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=U8*S8
struct SM90_64x192x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %101, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
"{%96, %97, %98, %99},"
" %100,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=U8*S8
struct SM90_64x208x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %109, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
"{%104, %105, %106, %107},"
" %108,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=U8*S8
struct SM90_64x208x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %109, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
"{%104, %105, %106, %107},"
" %108,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=U8*S8
struct SM90_64x224x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %117, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
"{%112, %113, %114, %115},"
" %116,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=U8*S8
struct SM90_64x224x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %117, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
"{%112, %113, %114, %115},"
" %116,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=U8*S8
struct SM90_64x240x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %125, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
"{%120, %121, %122, %123},"
" %124,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=U8*S8
struct SM90_64x240x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %125, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
"{%120, %121, %122, %123},"
" %124,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=U8*S8
struct SM90_64x256x32_S32U8S8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %133, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.s8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
"{%128, %129, %130, %131},"
" %132,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8S8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=U8*S8
struct SM90_64x256x32_S32U8S8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %133, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.s8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
"{%128, %129, %130, %131},"
" %132,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8S8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=U8*U8
struct SM90_64x8x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.u8 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=U8*U8
struct SM90_64x8x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=U8*U8
struct SM90_64x16x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=U8*U8
struct SM90_64x16x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=U8*U8
struct SM90_64x32x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=U8*U8
struct SM90_64x32x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=U8*U8
struct SM90_64x48x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=U8*U8
struct SM90_64x48x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=U8*U8
struct SM90_64x64x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=U8*U8
struct SM90_64x64x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=U8*U8
struct SM90_64x80x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=U8*U8
struct SM90_64x80x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=U8*U8
struct SM90_64x96x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=U8*U8
struct SM90_64x96x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=U8*U8
struct SM90_64x112x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=U8*U8
struct SM90_64x112x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=U8*U8
struct SM90_64x128x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=U8*U8
struct SM90_64x128x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=U8*U8
struct SM90_64x144x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %74, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
" %72,"
" %73,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=U8*U8
struct SM90_64x144x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %74, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
" %72,"
" %73,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=U8*U8
struct SM90_64x160x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %82, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
" %80,"
" %81,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=U8*U8
struct SM90_64x160x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %82, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
" %80,"
" %81,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=U8*U8
struct SM90_64x176x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %90, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
" %88,"
" %89,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=U8*U8
struct SM90_64x176x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %90, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
" %88,"
" %89,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=U8*U8
struct SM90_64x192x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %98, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
" %96,"
" %97,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=U8*U8
struct SM90_64x192x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %98, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
" %96,"
" %97,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=U8*U8
struct SM90_64x208x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %106, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
" %104,"
" %105,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=U8*U8
struct SM90_64x208x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %106, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
" %104,"
" %105,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=U8*U8
struct SM90_64x224x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %114, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
" %112,"
" %113,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=U8*U8
struct SM90_64x224x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %114, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
" %112,"
" %113,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=U8*U8
struct SM90_64x240x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %122, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
" %120,"
" %121,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=U8*U8
struct SM90_64x240x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %122, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
" %120,"
" %121,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=U8*U8
struct SM90_64x256x32_S32U8U8_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %130, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
" %128,"
" %129,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8U8_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=U8*U8
struct SM90_64x256x32_S32U8U8_SS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %130, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
" %128,"
" %129,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8U8_SS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=U8*U8
struct SM90_64x8x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.u8 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN S32+=U8*U8
struct SM90_64x8x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=U8*U8
struct SM90_64x16x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN S32+=U8*U8
struct SM90_64x16x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=U8*U8
struct SM90_64x32x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN S32+=U8*U8
struct SM90_64x32x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=U8*U8
struct SM90_64x48x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN S32+=U8*U8
struct SM90_64x48x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=U8*U8
struct SM90_64x64x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN S32+=U8*U8
struct SM90_64x64x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=U8*U8
struct SM90_64x80x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN S32+=U8*U8
struct SM90_64x80x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=U8*U8
struct SM90_64x96x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN S32+=U8*U8
struct SM90_64x96x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=U8*U8
struct SM90_64x112x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN S32+=U8*U8
struct SM90_64x112x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=U8*U8
struct SM90_64x128x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN S32+=U8*U8
struct SM90_64x128x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=U8*U8
struct SM90_64x144x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %77, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
"{%72, %73, %74, %75},"
" %76,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN S32+=U8*U8
struct SM90_64x144x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[72];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %77, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
"{%72, %73, %74, %75},"
" %76,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=U8*U8
struct SM90_64x160x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %85, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
"{%80, %81, %82, %83},"
" %84,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN S32+=U8*U8
struct SM90_64x160x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[80];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %85, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
"{%80, %81, %82, %83},"
" %84,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=U8*U8
struct SM90_64x176x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %93, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
"{%88, %89, %90, %91},"
" %92,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN S32+=U8*U8
struct SM90_64x176x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[88];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %93, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
"{%88, %89, %90, %91},"
" %92,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=U8*U8
struct SM90_64x192x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %101, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
"{%96, %97, %98, %99},"
" %100,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN S32+=U8*U8
struct SM90_64x192x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[96];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
uint32_t & d64, uint32_t & d65, uint32_t & d66, uint32_t & d67,
uint32_t & d68, uint32_t & d69, uint32_t & d70, uint32_t & d71,
uint32_t & d72, uint32_t & d73, uint32_t & d74, uint32_t & d75,
uint32_t & d76, uint32_t & d77, uint32_t & d78, uint32_t & d79,
uint32_t & d80, uint32_t & d81, uint32_t & d82, uint32_t & d83,
uint32_t & d84, uint32_t & d85, uint32_t & d86, uint32_t & d87,
uint32_t & d88, uint32_t & d89, uint32_t & d90, uint32_t & d91,
uint32_t & d92, uint32_t & d93, uint32_t & d94, uint32_t & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %101, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
"{%96, %97, %98, %99},"
" %100,"
" p;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63),
"+r"(d64), "+r"(d65), "+r"(d66), "+r"(d67),
"+r"(d68), "+r"(d69), "+r"(d70), "+r"(d71),
"+r"(d72), "+r"(d73), "+r"(d74), "+r"(d75),
"+r"(d76), "+r"(d77), "+r"(d78), "+r"(d79),
"+r"(d80), "+r"(d81), "+r"(d82), "+r"(d83),
"+r"(d84), "+r"(d85), "+r"(d86), "+r"(d87),
"+r"(d88), "+r"(d89), "+r"(d90), "+r"(d91),
"+r"(d92), "+r"(d93), "+r"(d94), "+r"(d95)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=U8*U8
struct SM90_64x208x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %109, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
"{%104, %105, %106, %107},"
" %108,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN S32+=U8*U8
struct SM90_64x208x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[104];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %109, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
"{%104, %105, %106, %107},"
" %108,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=U8*U8
struct SM90_64x224x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %117, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
"{%112, %113, %114, %115},"
" %116,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN S32+=U8*U8
struct SM90_64x224x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[112];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %117, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
"{%112, %113, %114, %115},"
" %116,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=U8*U8
struct SM90_64x240x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %125, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
"{%120, %121, %122, %123},"
" %124,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN S32+=U8*U8
struct SM90_64x240x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[120];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %125, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
"{%120, %121, %122, %123},"
" %124,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=U8*U8
struct SM90_64x256x32_S32U8U8_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %133, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.u8 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
"{%128, %129, %130, %131},"
" %132,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8U8_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN S32+=U8*U8
struct SM90_64x256x32_S32U8U8_RS_TN_SATURATE
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[128];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
uint32_t & d000, uint32_t & d001, uint32_t & d002, uint32_t & d003,
uint32_t & d004, uint32_t & d005, uint32_t & d006, uint32_t & d007,
uint32_t & d008, uint32_t & d009, uint32_t & d010, uint32_t & d011,
uint32_t & d012, uint32_t & d013, uint32_t & d014, uint32_t & d015,
uint32_t & d016, uint32_t & d017, uint32_t & d018, uint32_t & d019,
uint32_t & d020, uint32_t & d021, uint32_t & d022, uint32_t & d023,
uint32_t & d024, uint32_t & d025, uint32_t & d026, uint32_t & d027,
uint32_t & d028, uint32_t & d029, uint32_t & d030, uint32_t & d031,
uint32_t & d032, uint32_t & d033, uint32_t & d034, uint32_t & d035,
uint32_t & d036, uint32_t & d037, uint32_t & d038, uint32_t & d039,
uint32_t & d040, uint32_t & d041, uint32_t & d042, uint32_t & d043,
uint32_t & d044, uint32_t & d045, uint32_t & d046, uint32_t & d047,
uint32_t & d048, uint32_t & d049, uint32_t & d050, uint32_t & d051,
uint32_t & d052, uint32_t & d053, uint32_t & d054, uint32_t & d055,
uint32_t & d056, uint32_t & d057, uint32_t & d058, uint32_t & d059,
uint32_t & d060, uint32_t & d061, uint32_t & d062, uint32_t & d063,
uint32_t & d064, uint32_t & d065, uint32_t & d066, uint32_t & d067,
uint32_t & d068, uint32_t & d069, uint32_t & d070, uint32_t & d071,
uint32_t & d072, uint32_t & d073, uint32_t & d074, uint32_t & d075,
uint32_t & d076, uint32_t & d077, uint32_t & d078, uint32_t & d079,
uint32_t & d080, uint32_t & d081, uint32_t & d082, uint32_t & d083,
uint32_t & d084, uint32_t & d085, uint32_t & d086, uint32_t & d087,
uint32_t & d088, uint32_t & d089, uint32_t & d090, uint32_t & d091,
uint32_t & d092, uint32_t & d093, uint32_t & d094, uint32_t & d095,
uint32_t & d096, uint32_t & d097, uint32_t & d098, uint32_t & d099,
uint32_t & d100, uint32_t & d101, uint32_t & d102, uint32_t & d103,
uint32_t & d104, uint32_t & d105, uint32_t & d106, uint32_t & d107,
uint32_t & d108, uint32_t & d109, uint32_t & d110, uint32_t & d111,
uint32_t & d112, uint32_t & d113, uint32_t & d114, uint32_t & d115,
uint32_t & d116, uint32_t & d117, uint32_t & d118, uint32_t & d119,
uint32_t & d120, uint32_t & d121, uint32_t & d122, uint32_t & d123,
uint32_t & d124, uint32_t & d125, uint32_t & d126, uint32_t & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %133, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.s32.u8.u8.satfinite "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
"{%128, %129, %130, %131},"
" %132,"
" p;\n"
"}\n"
: "+r"(d000), "+r"(d001), "+r"(d002), "+r"(d003),
"+r"(d004), "+r"(d005), "+r"(d006), "+r"(d007),
"+r"(d008), "+r"(d009), "+r"(d010), "+r"(d011),
"+r"(d012), "+r"(d013), "+r"(d014), "+r"(d015),
"+r"(d016), "+r"(d017), "+r"(d018), "+r"(d019),
"+r"(d020), "+r"(d021), "+r"(d022), "+r"(d023),
"+r"(d024), "+r"(d025), "+r"(d026), "+r"(d027),
"+r"(d028), "+r"(d029), "+r"(d030), "+r"(d031),
"+r"(d032), "+r"(d033), "+r"(d034), "+r"(d035),
"+r"(d036), "+r"(d037), "+r"(d038), "+r"(d039),
"+r"(d040), "+r"(d041), "+r"(d042), "+r"(d043),
"+r"(d044), "+r"(d045), "+r"(d046), "+r"(d047),
"+r"(d048), "+r"(d049), "+r"(d050), "+r"(d051),
"+r"(d052), "+r"(d053), "+r"(d054), "+r"(d055),
"+r"(d056), "+r"(d057), "+r"(d058), "+r"(d059),
"+r"(d060), "+r"(d061), "+r"(d062), "+r"(d063),
"+r"(d064), "+r"(d065), "+r"(d066), "+r"(d067),
"+r"(d068), "+r"(d069), "+r"(d070), "+r"(d071),
"+r"(d072), "+r"(d073), "+r"(d074), "+r"(d075),
"+r"(d076), "+r"(d077), "+r"(d078), "+r"(d079),
"+r"(d080), "+r"(d081), "+r"(d082), "+r"(d083),
"+r"(d084), "+r"(d085), "+r"(d086), "+r"(d087),
"+r"(d088), "+r"(d089), "+r"(d090), "+r"(d091),
"+r"(d092), "+r"(d093), "+r"(d094), "+r"(d095),
"+r"(d096), "+r"(d097), "+r"(d098), "+r"(d099),
"+r"(d100), "+r"(d101), "+r"(d102), "+r"(d103),
"+r"(d104), "+r"(d105), "+r"(d106), "+r"(d107),
"+r"(d108), "+r"(d109), "+r"(d110), "+r"(d111),
"+r"(d112), "+r"(d113), "+r"(d114), "+r"(d115),
"+r"(d116), "+r"(d117), "+r"(d118), "+r"(d119),
"+r"(d120), "+r"(d121), "+r"(d122), "+r"(d123),
"+r"(d124), "+r"(d125), "+r"(d126), "+r"(d127)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_S32U8U8_RS_TN_SATURATE without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %4, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f16.e4m3.e4m3 "
"{%0, %1},"
" %2,"
" %3,"
" p, %5, %6;\n"
"}\n"
: "+r"(d0), "+r"(d1)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %7, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f16.e4m3.e4m3 "
"{%0, %1},"
"{%2, %3, %4, %5},"
" %6,"
" p, %8, %9;\n"
"}\n"
: "+r"(d0), "+r"(d1)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p, %7, %8;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p, %10, %11;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p, %7, %8;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p, %10, %11;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
float & d4, float & d5, float & d6, float & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p, %11, %12;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3),
"+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
float & d4, float & d5, float & d6, float & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p, %14, %15;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3),
"+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p, %11, %12;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p, %14, %15;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p, %19, %20;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p, %22, %23;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[12];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %14, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11},"
" %12,"
" %13,"
" p, %15, %16;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[12];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %17, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11},"
"{%12, %13, %14, %15},"
" %16,"
" p, %18, %19;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p, %27, %28;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p, %30, %31;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p, %19, %20;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p, %22, %23;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p, %35, %36;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p, %38, %39;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[20];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %22, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19},"
" %20,"
" %21,"
" p, %23, %24;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[20];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %25, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19},"
"{%20, %21, %22, %23},"
" %24,"
" p, %26, %27;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p, %43, %44;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p, %46, %47;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p, %27, %28;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p, %30, %31;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p, %51, %52;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p, %54, %55;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[28];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %30, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27},"
" %28,"
" %29,"
" p, %31, %32;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[28];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %33, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27},"
"{%28, %29, %30, %31},"
" %32,"
" p, %34, %35;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p, %59, %60;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p, %62, %63;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p, %35, %36;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p, %38, %39;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p, %67, %68;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p, %70, %71;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[36];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %38, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35},"
" %36,"
" %37,"
" p, %39, %40;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[36];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %41, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35},"
"{%36, %37, %38, %39},"
" %40,"
" p, %42, %43;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[72];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %74, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
" %72,"
" %73,"
" p, %75, %76;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[72];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %77, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
"{%72, %73, %74, %75},"
" %76,"
" p, %78, %79;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p, %43, %44;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p, %46, %47;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[80];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %82, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
" %80,"
" %81,"
" p, %83, %84;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[80];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %85, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
"{%80, %81, %82, %83},"
" %84,"
" p, %86, %87;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[44];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %46, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43},"
" %44,"
" %45,"
" p, %47, %48;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[44];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %49, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43},"
"{%44, %45, %46, %47},"
" %48,"
" p, %50, %51;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[88];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %90, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
" %88,"
" %89,"
" p, %91, %92;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[88];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %93, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
"{%88, %89, %90, %91},"
" %92,"
" p, %94, %95;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p, %51, %52;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p, %54, %55;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[96];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
float & d88, float & d89, float & d90, float & d91,
float & d92, float & d93, float & d94, float & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %98, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
" %96,"
" %97,"
" p, %99, %100;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87),
"+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91),
"+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[96];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
float & d88, float & d89, float & d90, float & d91,
float & d92, float & d93, float & d94, float & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %101, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
"{%96, %97, %98, %99},"
" %100,"
" p, %102, %103;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87),
"+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91),
"+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[52];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %54, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51},"
" %52,"
" %53,"
" p, %55, %56;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[52];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %57, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51},"
"{%52, %53, %54, %55},"
" %56,"
" p, %58, %59;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[104];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %106, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
" %104,"
" %105,"
" p, %107, %108;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[104];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %109, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
"{%104, %105, %106, %107},"
" %108,"
" p, %110, %111;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p, %59, %60;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p, %62, %63;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[112];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %114, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
" %112,"
" %113,"
" p, %115, %116;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[112];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %117, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
"{%112, %113, %114, %115},"
" %116,"
" p, %118, %119;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[60];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %62, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59},"
" %60,"
" %61,"
" p, %63, %64;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[60];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %65, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59},"
"{%60, %61, %62, %63},"
" %64,"
" p, %66, %67;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[120];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %122, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
" %120,"
" %121,"
" p, %123, %124;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[120];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %125, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
"{%120, %121, %122, %123},"
" %124,"
" p, %126, %127;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F16E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p, %67, %68;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F16+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F16E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f16.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p, %70, %71;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F32E4M3E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[128];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
float & d120, float & d121, float & d122, float & d123,
float & d124, float & d125, float & d126, float & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %130, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
" %128,"
" %129,"
" p, %131, %132;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119),
"+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123),
"+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E4M3E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F32+=E4M3*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F32E4M3E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[128];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
float & d120, float & d121, float & d122, float & d123,
float & d124, float & d125, float & d126, float & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %133, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f32.e4m3.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
"{%128, %129, %130, %131},"
" %132,"
" p, %134, %135;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119),
"+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123),
"+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E4M3E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %4, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f16.e4m3.e5m2 "
"{%0, %1},"
" %2,"
" %3,"
" p, %5, %6;\n"
"}\n"
: "+r"(d0), "+r"(d1)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %7, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f16.e4m3.e5m2 "
"{%0, %1},"
"{%2, %3, %4, %5},"
" %6,"
" p, %8, %9;\n"
"}\n"
: "+r"(d0), "+r"(d1)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p, %7, %8;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p, %10, %11;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p, %7, %8;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p, %10, %11;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
float & d4, float & d5, float & d6, float & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p, %11, %12;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3),
"+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
float & d4, float & d5, float & d6, float & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p, %14, %15;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3),
"+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p, %11, %12;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p, %14, %15;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p, %19, %20;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p, %22, %23;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[12];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %14, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11},"
" %12,"
" %13,"
" p, %15, %16;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[12];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %17, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11},"
"{%12, %13, %14, %15},"
" %16,"
" p, %18, %19;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p, %27, %28;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p, %30, %31;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p, %19, %20;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p, %22, %23;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p, %35, %36;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p, %38, %39;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[20];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %22, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19},"
" %20,"
" %21,"
" p, %23, %24;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[20];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %25, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19},"
"{%20, %21, %22, %23},"
" %24,"
" p, %26, %27;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p, %43, %44;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p, %46, %47;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p, %27, %28;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p, %30, %31;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p, %51, %52;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p, %54, %55;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[28];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %30, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27},"
" %28,"
" %29,"
" p, %31, %32;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[28];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %33, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27},"
"{%28, %29, %30, %31},"
" %32,"
" p, %34, %35;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p, %59, %60;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p, %62, %63;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p, %35, %36;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p, %38, %39;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p, %67, %68;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p, %70, %71;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[36];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %38, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35},"
" %36,"
" %37,"
" p, %39, %40;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[36];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %41, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35},"
"{%36, %37, %38, %39},"
" %40,"
" p, %42, %43;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[72];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %74, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
" %72,"
" %73,"
" p, %75, %76;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[72];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %77, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
"{%72, %73, %74, %75},"
" %76,"
" p, %78, %79;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p, %43, %44;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p, %46, %47;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[80];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %82, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
" %80,"
" %81,"
" p, %83, %84;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[80];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %85, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
"{%80, %81, %82, %83},"
" %84,"
" p, %86, %87;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[44];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %46, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43},"
" %44,"
" %45,"
" p, %47, %48;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[44];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %49, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43},"
"{%44, %45, %46, %47},"
" %48,"
" p, %50, %51;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[88];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %90, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
" %88,"
" %89,"
" p, %91, %92;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[88];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %93, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
"{%88, %89, %90, %91},"
" %92,"
" p, %94, %95;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p, %51, %52;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p, %54, %55;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[96];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
float & d88, float & d89, float & d90, float & d91,
float & d92, float & d93, float & d94, float & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %98, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
" %96,"
" %97,"
" p, %99, %100;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87),
"+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91),
"+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[96];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
float & d88, float & d89, float & d90, float & d91,
float & d92, float & d93, float & d94, float & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %101, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
"{%96, %97, %98, %99},"
" %100,"
" p, %102, %103;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87),
"+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91),
"+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[52];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %54, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51},"
" %52,"
" %53,"
" p, %55, %56;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[52];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %57, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51},"
"{%52, %53, %54, %55},"
" %56,"
" p, %58, %59;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[104];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %106, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
" %104,"
" %105,"
" p, %107, %108;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[104];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %109, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
"{%104, %105, %106, %107},"
" %108,"
" p, %110, %111;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p, %59, %60;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p, %62, %63;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[112];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %114, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
" %112,"
" %113,"
" p, %115, %116;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[112];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %117, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
"{%112, %113, %114, %115},"
" %116,"
" p, %118, %119;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[60];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %62, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59},"
" %60,"
" %61,"
" p, %63, %64;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[60];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %65, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59},"
"{%60, %61, %62, %63},"
" %64,"
" p, %66, %67;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[120];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %122, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
" %120,"
" %121,"
" p, %123, %124;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[120];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %125, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
"{%120, %121, %122, %123},"
" %124,"
" p, %126, %127;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F16E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p, %67, %68;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F16+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F16E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f16.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p, %70, %71;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F32E4M3E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[128];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
float & d120, float & d121, float & d122, float & d123,
float & d124, float & d125, float & d126, float & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %130, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
" %128,"
" %129,"
" p, %131, %132;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119),
"+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123),
"+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E4M3E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F32+=E4M3*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F32E4M3E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[128];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
float & d120, float & d121, float & d122, float & d123,
float & d124, float & d125, float & d126, float & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %133, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f32.e4m3.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
"{%128, %129, %130, %131},"
" %132,"
" p, %134, %135;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119),
"+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123),
"+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E4M3E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %4, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f16.e5m2.e4m3 "
"{%0, %1},"
" %2,"
" %3,"
" p, %5, %6;\n"
"}\n"
: "+r"(d0), "+r"(d1)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %7, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f16.e5m2.e4m3 "
"{%0, %1},"
"{%2, %3, %4, %5},"
" %6,"
" p, %8, %9;\n"
"}\n"
: "+r"(d0), "+r"(d1)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p, %7, %8;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p, %10, %11;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p, %7, %8;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p, %10, %11;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
float & d4, float & d5, float & d6, float & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p, %11, %12;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3),
"+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
float & d4, float & d5, float & d6, float & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p, %14, %15;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3),
"+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p, %11, %12;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p, %14, %15;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p, %19, %20;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p, %22, %23;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[12];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %14, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11},"
" %12,"
" %13,"
" p, %15, %16;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[12];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %17, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11},"
"{%12, %13, %14, %15},"
" %16,"
" p, %18, %19;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p, %27, %28;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p, %30, %31;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p, %19, %20;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p, %22, %23;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p, %35, %36;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p, %38, %39;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[20];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %22, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19},"
" %20,"
" %21,"
" p, %23, %24;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[20];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %25, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19},"
"{%20, %21, %22, %23},"
" %24,"
" p, %26, %27;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p, %43, %44;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p, %46, %47;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p, %27, %28;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p, %30, %31;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p, %51, %52;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p, %54, %55;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[28];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %30, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27},"
" %28,"
" %29,"
" p, %31, %32;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[28];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %33, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27},"
"{%28, %29, %30, %31},"
" %32,"
" p, %34, %35;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p, %59, %60;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p, %62, %63;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p, %35, %36;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p, %38, %39;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p, %67, %68;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p, %70, %71;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[36];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %38, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35},"
" %36,"
" %37,"
" p, %39, %40;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[36];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %41, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35},"
"{%36, %37, %38, %39},"
" %40,"
" p, %42, %43;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[72];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %74, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
" %72,"
" %73,"
" p, %75, %76;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[72];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %77, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
"{%72, %73, %74, %75},"
" %76,"
" p, %78, %79;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p, %43, %44;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p, %46, %47;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[80];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %82, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
" %80,"
" %81,"
" p, %83, %84;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[80];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %85, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
"{%80, %81, %82, %83},"
" %84,"
" p, %86, %87;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[44];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %46, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43},"
" %44,"
" %45,"
" p, %47, %48;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[44];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %49, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43},"
"{%44, %45, %46, %47},"
" %48,"
" p, %50, %51;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[88];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %90, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
" %88,"
" %89,"
" p, %91, %92;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[88];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %93, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
"{%88, %89, %90, %91},"
" %92,"
" p, %94, %95;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p, %51, %52;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p, %54, %55;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[96];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
float & d88, float & d89, float & d90, float & d91,
float & d92, float & d93, float & d94, float & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %98, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
" %96,"
" %97,"
" p, %99, %100;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87),
"+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91),
"+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[96];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
float & d88, float & d89, float & d90, float & d91,
float & d92, float & d93, float & d94, float & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %101, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
"{%96, %97, %98, %99},"
" %100,"
" p, %102, %103;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87),
"+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91),
"+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[52];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %54, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51},"
" %52,"
" %53,"
" p, %55, %56;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[52];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %57, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51},"
"{%52, %53, %54, %55},"
" %56,"
" p, %58, %59;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[104];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %106, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
" %104,"
" %105,"
" p, %107, %108;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[104];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %109, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
"{%104, %105, %106, %107},"
" %108,"
" p, %110, %111;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p, %59, %60;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p, %62, %63;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[112];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %114, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
" %112,"
" %113,"
" p, %115, %116;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[112];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %117, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
"{%112, %113, %114, %115},"
" %116,"
" p, %118, %119;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[60];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %62, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59},"
" %60,"
" %61,"
" p, %63, %64;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[60];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %65, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59},"
"{%60, %61, %62, %63},"
" %64,"
" p, %66, %67;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[120];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %122, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
" %120,"
" %121,"
" p, %123, %124;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[120];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %125, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
"{%120, %121, %122, %123},"
" %124,"
" p, %126, %127;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F16E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p, %67, %68;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F16+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F16E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f16.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p, %70, %71;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F32E5M2E4M3_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[128];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
float & d120, float & d121, float & d122, float & d123,
float & d124, float & d125, float & d126, float & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %130, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
" %128,"
" %129,"
" p, %131, %132;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119),
"+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123),
"+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E5M2E4M3_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F32+=E5M2*E4M3
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F32E5M2E4M3_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[128];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
float & d120, float & d121, float & d122, float & d123,
float & d124, float & d125, float & d126, float & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %133, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f32.e5m2.e4m3 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
"{%128, %129, %130, %131},"
" %132,"
" p, %134, %135;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119),
"+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123),
"+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E5M2E4M3_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %4, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f16.e5m2.e5m2 "
"{%0, %1},"
" %2,"
" %3,"
" p, %5, %6;\n"
"}\n"
: "+r"(d0), "+r"(d1)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[2];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %7, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f16.e5m2.e5m2 "
"{%0, %1},"
"{%2, %3, %4, %5},"
" %6,"
" p, %8, %9;\n"
"}\n"
: "+r"(d0), "+r"(d1)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p, %7, %8;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x8x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x8x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n8k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p, %10, %11;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x8x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %6, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3},"
" %4,"
" %5,"
" p, %7, %8;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[4];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %9, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
" %8,"
" p, %10, %11;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
float & d4, float & d5, float & d6, float & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p, %11, %12;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3),
"+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x16x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x16x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
float & d0, float & d1, float & d2, float & d3,
float & d4, float & d5, float & d6, float & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n16k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p, %14, %15;\n"
"}\n"
: "+f"(d0), "+f"(d1), "+f"(d2), "+f"(d3),
"+f"(d4), "+f"(d5), "+f"(d6), "+f"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x16x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %10, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
" %8,"
" %9,"
" p, %11, %12;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[8];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3,
uint64_t const& desc_b,
uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3,
uint32_t & d4, uint32_t & d5, uint32_t & d6, uint32_t & d7,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %13, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7},"
"{%8, %9, %10, %11},"
" %12,"
" p, %14, %15;\n"
"}\n"
: "+r"(d0), "+r"(d1), "+r"(d2), "+r"(d3),
"+r"(d4), "+r"(d5), "+r"(d6), "+r"(d7)
: "r"(a0), "r"(a1), "r"(a2), "r"(a3),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p, %19, %20;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x32x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x32x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n32k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p, %22, %23;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x32x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[12];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %14, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11},"
" %12,"
" %13,"
" p, %15, %16;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[12];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %17, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11},"
"{%12, %13, %14, %15},"
" %16,"
" p, %18, %19;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p, %27, %28;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x48x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x48x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n48k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p, %30, %31;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x48x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %18, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
" %16,"
" %17,"
" p, %19, %20;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[16];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %21, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15},"
"{%16, %17, %18, %19},"
" %20,"
" p, %22, %23;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p, %35, %36;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x64x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x64x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n64k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p, %38, %39;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x64x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[20];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %22, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19},"
" %20,"
" %21,"
" p, %23, %24;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[20];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %25, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19},"
"{%20, %21, %22, %23},"
" %24,"
" p, %26, %27;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p, %43, %44;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x80x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x80x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n80k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p, %46, %47;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x80x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %26, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
" %24,"
" %25,"
" p, %27, %28;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[24];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %29, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23},"
"{%24, %25, %26, %27},"
" %28,"
" p, %30, %31;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p, %51, %52;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x96x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x96x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n96k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p, %54, %55;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x96x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[28];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %30, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27},"
" %28,"
" %29,"
" p, %31, %32;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[28];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %33, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27},"
"{%28, %29, %30, %31},"
" %32,"
" p, %34, %35;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p, %59, %60;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x112x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x112x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n112k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p, %62, %63;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x112x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %34, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
" %32,"
" %33,"
" p, %35, %36;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[32];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %37, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31},"
"{%32, %33, %34, %35},"
" %36,"
" p, %38, %39;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p, %67, %68;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x128x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x128x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n128k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p, %70, %71;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x128x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[36];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %38, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35},"
" %36,"
" %37,"
" p, %39, %40;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[36];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %41, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35},"
"{%36, %37, %38, %39},"
" %40,"
" p, %42, %43;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[72];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %74, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
" %72,"
" %73,"
" p, %75, %76;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x144x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x144x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[72];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %77, 0;\n"
"wgmma.mma_async.sync.aligned.m64n144k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71},"
"{%72, %73, %74, %75},"
" %76,"
" p, %78, %79;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x144x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %42, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
" %40,"
" %41,"
" p, %43, %44;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[40];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %45, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39},"
"{%40, %41, %42, %43},"
" %44,"
" p, %46, %47;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[80];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %82, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
" %80,"
" %81,"
" p, %83, %84;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x160x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x160x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[80];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %85, 0;\n"
"wgmma.mma_async.sync.aligned.m64n160k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79},"
"{%80, %81, %82, %83},"
" %84,"
" p, %86, %87;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x160x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[44];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %46, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43},"
" %44,"
" %45,"
" p, %47, %48;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[44];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %49, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43},"
"{%44, %45, %46, %47},"
" %48,"
" p, %50, %51;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[88];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %90, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
" %88,"
" %89,"
" p, %91, %92;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x176x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x176x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[88];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %93, 0;\n"
"wgmma.mma_async.sync.aligned.m64n176k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87},"
"{%88, %89, %90, %91},"
" %92,"
" p, %94, %95;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x176x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %50, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
" %48,"
" %49,"
" p, %51, %52;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[48];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %53, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47},"
"{%48, %49, %50, %51},"
" %52,"
" p, %54, %55;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[96];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
float & d88, float & d89, float & d90, float & d91,
float & d92, float & d93, float & d94, float & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %98, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
" %96,"
" %97,"
" p, %99, %100;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87),
"+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91),
"+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x192x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x192x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[96];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
float & d00, float & d01, float & d02, float & d03,
float & d04, float & d05, float & d06, float & d07,
float & d08, float & d09, float & d10, float & d11,
float & d12, float & d13, float & d14, float & d15,
float & d16, float & d17, float & d18, float & d19,
float & d20, float & d21, float & d22, float & d23,
float & d24, float & d25, float & d26, float & d27,
float & d28, float & d29, float & d30, float & d31,
float & d32, float & d33, float & d34, float & d35,
float & d36, float & d37, float & d38, float & d39,
float & d40, float & d41, float & d42, float & d43,
float & d44, float & d45, float & d46, float & d47,
float & d48, float & d49, float & d50, float & d51,
float & d52, float & d53, float & d54, float & d55,
float & d56, float & d57, float & d58, float & d59,
float & d60, float & d61, float & d62, float & d63,
float & d64, float & d65, float & d66, float & d67,
float & d68, float & d69, float & d70, float & d71,
float & d72, float & d73, float & d74, float & d75,
float & d76, float & d77, float & d78, float & d79,
float & d80, float & d81, float & d82, float & d83,
float & d84, float & d85, float & d86, float & d87,
float & d88, float & d89, float & d90, float & d91,
float & d92, float & d93, float & d94, float & d95,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %101, 0;\n"
"wgmma.mma_async.sync.aligned.m64n192k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95},"
"{%96, %97, %98, %99},"
" %100,"
" p, %102, %103;\n"
"}\n"
: "+f"(d00), "+f"(d01), "+f"(d02), "+f"(d03),
"+f"(d04), "+f"(d05), "+f"(d06), "+f"(d07),
"+f"(d08), "+f"(d09), "+f"(d10), "+f"(d11),
"+f"(d12), "+f"(d13), "+f"(d14), "+f"(d15),
"+f"(d16), "+f"(d17), "+f"(d18), "+f"(d19),
"+f"(d20), "+f"(d21), "+f"(d22), "+f"(d23),
"+f"(d24), "+f"(d25), "+f"(d26), "+f"(d27),
"+f"(d28), "+f"(d29), "+f"(d30), "+f"(d31),
"+f"(d32), "+f"(d33), "+f"(d34), "+f"(d35),
"+f"(d36), "+f"(d37), "+f"(d38), "+f"(d39),
"+f"(d40), "+f"(d41), "+f"(d42), "+f"(d43),
"+f"(d44), "+f"(d45), "+f"(d46), "+f"(d47),
"+f"(d48), "+f"(d49), "+f"(d50), "+f"(d51),
"+f"(d52), "+f"(d53), "+f"(d54), "+f"(d55),
"+f"(d56), "+f"(d57), "+f"(d58), "+f"(d59),
"+f"(d60), "+f"(d61), "+f"(d62), "+f"(d63),
"+f"(d64), "+f"(d65), "+f"(d66), "+f"(d67),
"+f"(d68), "+f"(d69), "+f"(d70), "+f"(d71),
"+f"(d72), "+f"(d73), "+f"(d74), "+f"(d75),
"+f"(d76), "+f"(d77), "+f"(d78), "+f"(d79),
"+f"(d80), "+f"(d81), "+f"(d82), "+f"(d83),
"+f"(d84), "+f"(d85), "+f"(d86), "+f"(d87),
"+f"(d88), "+f"(d89), "+f"(d90), "+f"(d91),
"+f"(d92), "+f"(d93), "+f"(d94), "+f"(d95)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x192x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[52];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %54, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51},"
" %52,"
" %53,"
" p, %55, %56;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[52];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %57, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51},"
"{%52, %53, %54, %55},"
" %56,"
" p, %58, %59;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[104];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %106, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
" %104,"
" %105,"
" p, %107, %108;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x208x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x208x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[104];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %109, 0;\n"
"wgmma.mma_async.sync.aligned.m64n208k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103},"
"{%104, %105, %106, %107},"
" %108,"
" p, %110, %111;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x208x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %58, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
" %56,"
" %57,"
" p, %59, %60;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[56];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %61, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55},"
"{%56, %57, %58, %59},"
" %60,"
" p, %62, %63;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[112];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %114, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
" %112,"
" %113,"
" p, %115, %116;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x224x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x224x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[112];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %117, 0;\n"
"wgmma.mma_async.sync.aligned.m64n224k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111},"
"{%112, %113, %114, %115},"
" %116,"
" p, %118, %119;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x224x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[60];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %62, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59},"
" %60,"
" %61,"
" p, %63, %64;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[60];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %65, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59},"
"{%60, %61, %62, %63},"
" %64,"
" p, %66, %67;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[120];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %122, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
" %120,"
" %121,"
" p, %123, %124;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
// GMMA 64x240x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x240x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[120];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %125, 0;\n"
"wgmma.mma_async.sync.aligned.m64n240k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119},"
"{%120, %121, %122, %123},"
" %124,"
" p, %126, %127;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x240x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F16E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %66, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
" %64,"
" %65,"
" p, %67, %68;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F16+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F16E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = uint32_t[64];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a00, uint32_t const& a01, uint32_t const& a02, uint32_t const& a03,
uint64_t const& desc_b,
uint32_t & d00, uint32_t & d01, uint32_t & d02, uint32_t & d03,
uint32_t & d04, uint32_t & d05, uint32_t & d06, uint32_t & d07,
uint32_t & d08, uint32_t & d09, uint32_t & d10, uint32_t & d11,
uint32_t & d12, uint32_t & d13, uint32_t & d14, uint32_t & d15,
uint32_t & d16, uint32_t & d17, uint32_t & d18, uint32_t & d19,
uint32_t & d20, uint32_t & d21, uint32_t & d22, uint32_t & d23,
uint32_t & d24, uint32_t & d25, uint32_t & d26, uint32_t & d27,
uint32_t & d28, uint32_t & d29, uint32_t & d30, uint32_t & d31,
uint32_t & d32, uint32_t & d33, uint32_t & d34, uint32_t & d35,
uint32_t & d36, uint32_t & d37, uint32_t & d38, uint32_t & d39,
uint32_t & d40, uint32_t & d41, uint32_t & d42, uint32_t & d43,
uint32_t & d44, uint32_t & d45, uint32_t & d46, uint32_t & d47,
uint32_t & d48, uint32_t & d49, uint32_t & d50, uint32_t & d51,
uint32_t & d52, uint32_t & d53, uint32_t & d54, uint32_t & d55,
uint32_t & d56, uint32_t & d57, uint32_t & d58, uint32_t & d59,
uint32_t & d60, uint32_t & d61, uint32_t & d62, uint32_t & d63,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %69, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f16.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63},"
"{%64, %65, %66, %67},"
" %68,"
" p, %70, %71;\n"
"}\n"
: "+r"(d00), "+r"(d01), "+r"(d02), "+r"(d03),
"+r"(d04), "+r"(d05), "+r"(d06), "+r"(d07),
"+r"(d08), "+r"(d09), "+r"(d10), "+r"(d11),
"+r"(d12), "+r"(d13), "+r"(d14), "+r"(d15),
"+r"(d16), "+r"(d17), "+r"(d18), "+r"(d19),
"+r"(d20), "+r"(d21), "+r"(d22), "+r"(d23),
"+r"(d24), "+r"(d25), "+r"(d26), "+r"(d27),
"+r"(d28), "+r"(d29), "+r"(d30), "+r"(d31),
"+r"(d32), "+r"(d33), "+r"(d34), "+r"(d35),
"+r"(d36), "+r"(d37), "+r"(d38), "+r"(d39),
"+r"(d40), "+r"(d41), "+r"(d42), "+r"(d43),
"+r"(d44), "+r"(d45), "+r"(d46), "+r"(d47),
"+r"(d48), "+r"(d49), "+r"(d50), "+r"(d51),
"+r"(d52), "+r"(d53), "+r"(d54), "+r"(d55),
"+r"(d56), "+r"(d57), "+r"(d58), "+r"(d59),
"+r"(d60), "+r"(d61), "+r"(d62), "+r"(d63)
: "r"(a00), "r"(a01), "r"(a02), "r"(a03),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F16E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F32E5M2E5M2_SS_TN
{
using DRegisters = void;
using ARegisters = uint64_t[1];
using BRegisters = uint64_t[1];
using CRegisters = float[128];
CUTE_HOST_DEVICE static void
fma(uint64_t const& desc_a,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
float & d120, float & d121, float & d122, float & d123,
float & d124, float & d125, float & d126, float & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %130, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
" %128,"
" %129,"
" p, %131, %132;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119),
"+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123),
"+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127)
: "l"(desc_a),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E5M2E5M2_SS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// GMMA 64x256x32 TN F32+=E5M2*E5M2
template <
GMMA::ScaleIn scaleA = GMMA::ScaleIn::One,
GMMA::ScaleIn scaleB = GMMA::ScaleIn::One
>
struct SM90_64x256x32_F32E5M2E5M2_RS_TN
{
using DRegisters = void;
using ARegisters = uint32_t[4];
using BRegisters = uint64_t[1];
using CRegisters = float[128];
CUTE_HOST_DEVICE static void
fma(uint32_t const& a000, uint32_t const& a001, uint32_t const& a002, uint32_t const& a003,
uint64_t const& desc_b,
float & d000, float & d001, float & d002, float & d003,
float & d004, float & d005, float & d006, float & d007,
float & d008, float & d009, float & d010, float & d011,
float & d012, float & d013, float & d014, float & d015,
float & d016, float & d017, float & d018, float & d019,
float & d020, float & d021, float & d022, float & d023,
float & d024, float & d025, float & d026, float & d027,
float & d028, float & d029, float & d030, float & d031,
float & d032, float & d033, float & d034, float & d035,
float & d036, float & d037, float & d038, float & d039,
float & d040, float & d041, float & d042, float & d043,
float & d044, float & d045, float & d046, float & d047,
float & d048, float & d049, float & d050, float & d051,
float & d052, float & d053, float & d054, float & d055,
float & d056, float & d057, float & d058, float & d059,
float & d060, float & d061, float & d062, float & d063,
float & d064, float & d065, float & d066, float & d067,
float & d068, float & d069, float & d070, float & d071,
float & d072, float & d073, float & d074, float & d075,
float & d076, float & d077, float & d078, float & d079,
float & d080, float & d081, float & d082, float & d083,
float & d084, float & d085, float & d086, float & d087,
float & d088, float & d089, float & d090, float & d091,
float & d092, float & d093, float & d094, float & d095,
float & d096, float & d097, float & d098, float & d099,
float & d100, float & d101, float & d102, float & d103,
float & d104, float & d105, float & d106, float & d107,
float & d108, float & d109, float & d110, float & d111,
float & d112, float & d113, float & d114, float & d115,
float & d116, float & d117, float & d118, float & d119,
float & d120, float & d121, float & d122, float & d123,
float & d124, float & d125, float & d126, float & d127,
GMMA::ScaleOut const scale_D = GMMA::ScaleOut::One)
{
#if defined(CUTE_ARCH_MMA_SM90A_ENABLED)
asm volatile(
"{\n"
".reg .pred p;\n"
"setp.ne.b32 p, %133, 0;\n"
"wgmma.mma_async.sync.aligned.m64n256k32.f32.e5m2.e5m2 "
"{%0, %1, %2, %3, %4, %5, %6, %7, "
" %8, %9, %10, %11, %12, %13, %14, %15, "
" %16, %17, %18, %19, %20, %21, %22, %23, "
" %24, %25, %26, %27, %28, %29, %30, %31, "
" %32, %33, %34, %35, %36, %37, %38, %39, "
" %40, %41, %42, %43, %44, %45, %46, %47, "
" %48, %49, %50, %51, %52, %53, %54, %55, "
" %56, %57, %58, %59, %60, %61, %62, %63, "
" %64, %65, %66, %67, %68, %69, %70, %71, "
" %72, %73, %74, %75, %76, %77, %78, %79, "
" %80, %81, %82, %83, %84, %85, %86, %87, "
" %88, %89, %90, %91, %92, %93, %94, %95, "
" %96, %97, %98, %99, %100, %101, %102, %103, "
" %104, %105, %106, %107, %108, %109, %110, %111, "
" %112, %113, %114, %115, %116, %117, %118, %119, "
" %120, %121, %122, %123, %124, %125, %126, %127},"
"{%128, %129, %130, %131},"
" %132,"
" p, %134, %135;\n"
"}\n"
: "+f"(d000), "+f"(d001), "+f"(d002), "+f"(d003),
"+f"(d004), "+f"(d005), "+f"(d006), "+f"(d007),
"+f"(d008), "+f"(d009), "+f"(d010), "+f"(d011),
"+f"(d012), "+f"(d013), "+f"(d014), "+f"(d015),
"+f"(d016), "+f"(d017), "+f"(d018), "+f"(d019),
"+f"(d020), "+f"(d021), "+f"(d022), "+f"(d023),
"+f"(d024), "+f"(d025), "+f"(d026), "+f"(d027),
"+f"(d028), "+f"(d029), "+f"(d030), "+f"(d031),
"+f"(d032), "+f"(d033), "+f"(d034), "+f"(d035),
"+f"(d036), "+f"(d037), "+f"(d038), "+f"(d039),
"+f"(d040), "+f"(d041), "+f"(d042), "+f"(d043),
"+f"(d044), "+f"(d045), "+f"(d046), "+f"(d047),
"+f"(d048), "+f"(d049), "+f"(d050), "+f"(d051),
"+f"(d052), "+f"(d053), "+f"(d054), "+f"(d055),
"+f"(d056), "+f"(d057), "+f"(d058), "+f"(d059),
"+f"(d060), "+f"(d061), "+f"(d062), "+f"(d063),
"+f"(d064), "+f"(d065), "+f"(d066), "+f"(d067),
"+f"(d068), "+f"(d069), "+f"(d070), "+f"(d071),
"+f"(d072), "+f"(d073), "+f"(d074), "+f"(d075),
"+f"(d076), "+f"(d077), "+f"(d078), "+f"(d079),
"+f"(d080), "+f"(d081), "+f"(d082), "+f"(d083),
"+f"(d084), "+f"(d085), "+f"(d086), "+f"(d087),
"+f"(d088), "+f"(d089), "+f"(d090), "+f"(d091),
"+f"(d092), "+f"(d093), "+f"(d094), "+f"(d095),
"+f"(d096), "+f"(d097), "+f"(d098), "+f"(d099),
"+f"(d100), "+f"(d101), "+f"(d102), "+f"(d103),
"+f"(d104), "+f"(d105), "+f"(d106), "+f"(d107),
"+f"(d108), "+f"(d109), "+f"(d110), "+f"(d111),
"+f"(d112), "+f"(d113), "+f"(d114), "+f"(d115),
"+f"(d116), "+f"(d117), "+f"(d118), "+f"(d119),
"+f"(d120), "+f"(d121), "+f"(d122), "+f"(d123),
"+f"(d124), "+f"(d125), "+f"(d126), "+f"(d127)
: "r"(a000), "r"(a001), "r"(a002), "r"(a003),
"l"(desc_b),
"r"(int32_t(scale_D)), "n"(int32_t(scaleA)), "n"(int32_t(scaleB)));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_64x256x32_F32E5M2E5M2_RS_TN without CUTE_ARCH_MMA_SM90A_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cute
| include/cute/arch/mma_sm90_gmma.hpp/0 | {
"file_path": "include/cute/arch/mma_sm90_gmma.hpp",
"repo_id": "include",
"token_count": 1429446
} | 12 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/container/tuple.hpp>
#include <cute/numeric/integral_constant.hpp>
#include <cute/algorithm/functional.hpp>
#include <cute/algorithm/tuple_algorithms.hpp>
#include <cute/util/type_traits.hpp>
namespace cute
{
template <class... T>
struct ArithmeticTuple : tuple<T...>
{
template <class... U>
CUTE_HOST_DEVICE constexpr
ArithmeticTuple(ArithmeticTuple<U...> const& u)
: tuple<T...>(static_cast<tuple<U...> const&>(u)) {}
template <class... U>
CUTE_HOST_DEVICE constexpr
ArithmeticTuple(tuple<U...> const& u)
: tuple<T...>(u) {}
template <class... U>
CUTE_HOST_DEVICE constexpr
ArithmeticTuple(U const&... u)
: tuple<T...>(u...) {}
};
template <class... T>
struct is_tuple<ArithmeticTuple<T...>> : true_type {};
template <class... Ts>
struct is_flat<ArithmeticTuple<Ts...>> : is_flat<tuple<Ts...>> {};
template <class... T>
CUTE_HOST_DEVICE constexpr
auto
make_arithmetic_tuple(T const&... t) {
return ArithmeticTuple<T...>(t...);
}
template <class T>
CUTE_HOST_DEVICE constexpr
auto
as_arithmetic_tuple(T const& t) {
if constexpr (is_tuple<T>::value) {
return detail::tapply(t, [](auto const& x){ return as_arithmetic_tuple(x); },
[](auto const&... a){ return make_arithmetic_tuple(a...); },
tuple_seq<T>{});
} else {
return t;
}
}
//
// Numeric operators
//
// Addition
template <class... T, class... U>
CUTE_HOST_DEVICE constexpr
auto
operator+(ArithmeticTuple<T...> const& t, ArithmeticTuple<U...> const& u) {
constexpr int R = cute::max(int(sizeof...(T)), int(sizeof...(U)));
return transform_apply(append<R>(t,Int<0>{}), append<R>(u,Int<0>{}), plus{}, [](auto const&... a){ return make_arithmetic_tuple(a...); });
}
template <class... T, class... U>
CUTE_HOST_DEVICE constexpr
auto
operator+(ArithmeticTuple<T...> const& t, tuple<U...> const& u) {
return t + ArithmeticTuple<U...>(u);
}
template <class... T, class... U>
CUTE_HOST_DEVICE constexpr
auto
operator+(tuple<T...> const& t, ArithmeticTuple<U...> const& u) {
return ArithmeticTuple<T...>(t) + u;
}
// Subtraction
template <class... T, class... U>
CUTE_HOST_DEVICE constexpr
auto
operator-(ArithmeticTuple<T...> const& t, ArithmeticTuple<U...> const& u) {
constexpr int R = cute::max(int(sizeof...(T)), int(sizeof...(U)));
return transform_apply(append<R>(t,Int<0>{}), append<R>(u,Int<0>{}), minus{}, [](auto const&... a){ return make_arithmetic_tuple(a...); });
}
template <class... T, class... U>
CUTE_HOST_DEVICE constexpr
auto
operator-(ArithmeticTuple<T...> const& t, tuple<U...> const& u) {
return t - ArithmeticTuple<U...>(u);
}
template <class... T, class... U>
CUTE_HOST_DEVICE constexpr
auto
operator-(tuple<T...> const& t, ArithmeticTuple<U...> const& u) {
return ArithmeticTuple<T...>(t) - u;
}
// Negation
template <class... T>
CUTE_HOST_DEVICE constexpr
auto
operator-(ArithmeticTuple<T...> const& t) {
return transform_apply(t, negate{}, [](auto const&... a){ return make_arithmetic_tuple(a...); });
}
//
// Special cases
//
template <auto t, class... U>
CUTE_HOST_DEVICE constexpr
ArithmeticTuple<U...> const&
operator+(C<t>, ArithmeticTuple<U...> const& u) {
static_assert(t == 0, "Arithmetic tuple op+ error!");
return u;
}
template <class... T, auto u>
CUTE_HOST_DEVICE constexpr
ArithmeticTuple<T...> const&
operator+(ArithmeticTuple<T...> const& t, C<u>) {
static_assert(u == 0, "Arithmetic tuple op+ error!");
return t;
}
template <auto t, class... U>
CUTE_HOST_DEVICE constexpr
ArithmeticTuple<U...> const&
operator-(C<t>, ArithmeticTuple<U...> const& u) {
static_assert(t == 0, "Arithmetic tuple op- error!");
return -u;
}
template <class... T, auto u>
CUTE_HOST_DEVICE constexpr
ArithmeticTuple<T...> const&
operator-(ArithmeticTuple<T...> const& t, C<u>) {
static_assert(u == 0, "Arithmetic tuple op- error!");
return t;
}
//
// ArithmeticTupleIterator
//
template <class ArithTuple>
struct ArithmeticTupleIterator
{
using value_type = ArithTuple;
using element_type = ArithTuple;
using reference = ArithTuple;
ArithTuple coord_;
CUTE_HOST_DEVICE constexpr
ArithmeticTupleIterator(ArithTuple const& coord = {}) : coord_(coord) {}
CUTE_HOST_DEVICE constexpr
ArithTuple const& operator*() const { return coord_; }
template <class Coord>
CUTE_HOST_DEVICE constexpr
auto operator[](Coord const& c) const { return *(*this + c); }
template <class Coord>
CUTE_HOST_DEVICE constexpr
auto operator+(Coord const& c) const {
return ArithmeticTupleIterator<decltype(coord_ + c)>(coord_ + c);
}
};
template <class Tuple>
CUTE_HOST_DEVICE constexpr
auto
make_inttuple_iter(Tuple const& t) {
return ArithmeticTupleIterator(as_arithmetic_tuple(t));
}
template <class T0, class T1, class... Ts>
CUTE_HOST_DEVICE constexpr
auto
make_inttuple_iter(T0 const& t0, T1 const& t1, Ts const&... ts) {
return make_inttuple_iter(cute::make_tuple(t0, t1, ts...));
}
//
// ArithmeticTuple "basis" elements
// A ScaledBasis<T,N> is a (at least) rank-N+1 ArithmeticTuple:
// (_0,_0,...,T,_0,...)
// with value T in the Nth mode
template <class T, int N>
struct ScaledBasis : private tuple<T>
{
CUTE_HOST_DEVICE constexpr
ScaledBasis(T const& t = {}) : tuple<T>(t) {}
CUTE_HOST_DEVICE constexpr
decltype(auto) value() { return get<0>(static_cast<tuple<T> &>(*this)); }
CUTE_HOST_DEVICE constexpr
decltype(auto) value() const { return get<0>(static_cast<tuple<T> const&>(*this)); }
CUTE_HOST_DEVICE static constexpr
auto mode() { return Int<N>{}; }
};
template <class T>
struct is_scaled_basis : false_type {};
template <class T, int N>
struct is_scaled_basis<ScaledBasis<T,N>> : true_type {};
template <class T, int N>
struct is_integral<ScaledBasis<T,N>> : true_type {};
// Get the scalar T out of a ScaledBasis
template <class SB>
CUTE_HOST_DEVICE constexpr auto
basis_value(SB const& e)
{
if constexpr (is_scaled_basis<SB>::value) {
return basis_value(e.value());
} else {
return e;
}
CUTE_GCC_UNREACHABLE;
}
// Apply the N... pack to another Tuple
template <class SB, class Tuple>
CUTE_HOST_DEVICE constexpr auto
basis_get(SB const& e, Tuple const& t)
{
if constexpr (is_scaled_basis<SB>::value) {
return basis_get(e.value(), get<SB::mode()>(t));
} else {
return t;
}
CUTE_GCC_UNREACHABLE;
}
namespace detail {
template <class T, int... I>
CUTE_HOST_DEVICE constexpr
auto
to_atuple_i(T const& t, seq<I...>) {
return make_arithmetic_tuple((void(I),Int<0>{})..., t);
}
} // end namespace detail
// Turn a ScaledBases<T,N> into a rank-N+1 ArithmeticTuple
// with N prefix 0s: (_0,_0,...N...,_0,T)
template <class T, int N>
CUTE_HOST_DEVICE constexpr
auto
as_arithmetic_tuple(ScaledBasis<T,N> const& t) {
return detail::to_atuple_i(as_arithmetic_tuple(t.value()), make_seq<N>{});
}
namespace detail {
template <int... Ns>
struct Basis;
template <>
struct Basis<> {
using type = Int<1>;
};
template <int N, int... Ns>
struct Basis<N,Ns...> {
using type = ScaledBasis<typename Basis<Ns...>::type, N>;
};
} // end namespace detail
// Shortcut for writing ScaledBasis<ScaledBasis<ScaledBasis<Int<1>, N0>, N1>, ...>
// E<> := _1
// E<0> := (_1,_0,_0,...)
// E<1> := (_0,_1,_0,...)
// E<0,0> := ((_1,_0,_0,...),_0,_0,...)
// E<0,1> := ((_0,_1,_0,...),_0,_0,...)
// E<1,0> := (_0,(_1,_0,_0,...),_0,...)
// E<1,1> := (_0,(_0,_1,_0,...),_0,...)
template <int... N>
using E = typename detail::Basis<N...>::type;
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
make_basis_like(Shape const& shape)
{
if constexpr (is_integral<Shape>::value) {
return Int<1>{};
} else {
// Generate bases for each rank of shape
return transform(tuple_seq<Shape>{}, shape, [](auto I, auto si) {
// Generate bases for each rank of si and add an i on front
using I_type = decltype(I);
return transform_leaf(make_basis_like(si), [](auto e) {
// MSVC has trouble capturing variables as constexpr,
// so that they can be used as template arguments.
// This is exactly what the code needs to do with i, unfortunately.
// The work-around is to define i inside the inner lambda,
// by using just the type from the enclosing scope.
constexpr int i = I_type::value;
return ScaledBasis<decltype(e), i>{};
});
});
}
CUTE_GCC_UNREACHABLE;
}
//
// Arithmetic
//
template <class T, int M, class U>
CUTE_HOST_DEVICE constexpr
auto
safe_div(ScaledBasis<T,M> const& b, U const& u)
{
auto t = safe_div(b.value(), u);
return ScaledBasis<decltype(t),M>{t};
}
template <class T, int M, class U>
CUTE_HOST_DEVICE constexpr
auto
shape_div(ScaledBasis<T,M> const& b, U const& u)
{
auto t = shape_div(b.value(), u);
return ScaledBasis<decltype(t),M>{t};
}
// Equality
template <class T, int N, class U, int M>
CUTE_HOST_DEVICE constexpr
auto
operator==(ScaledBasis<T,N> const& t, ScaledBasis<U,M> const& u) {
return bool_constant<M == N>{} && t.value() == u.value();
}
// Not equal to anything else
template <class T, int N, class U>
CUTE_HOST_DEVICE constexpr
false_type
operator==(ScaledBasis<T,N> const&, U const&) {
return {};
}
template <class T, class U, int M>
CUTE_HOST_DEVICE constexpr
false_type
operator==(T const&, ScaledBasis<U,M> const&) {
return {};
}
// Abs
template <class T, int N>
CUTE_HOST_DEVICE constexpr
auto
abs(ScaledBasis<T,N> const& e) {
return ScaledBasis<decltype(abs(e.value())),N>{abs(e.value())};
}
// Multiplication
template <class A, class T, int N>
CUTE_HOST_DEVICE constexpr
auto
operator*(A const& a, ScaledBasis<T,N> const& e) {
auto r = a * e.value();
return ScaledBasis<decltype(r),N>{r};
}
template <class T, int N, class B>
CUTE_HOST_DEVICE constexpr
auto
operator*(ScaledBasis<T,N> const& e, B const& b) {
auto r = e.value() * b;
return ScaledBasis<decltype(r),N>{r};
}
// Addition
template <class T, int N, class U, int M>
CUTE_HOST_DEVICE constexpr
auto
operator+(ScaledBasis<T,N> const& t, ScaledBasis<U,M> const& u) {
return as_arithmetic_tuple(t) + as_arithmetic_tuple(u);
}
template <class T, int N, class... U>
CUTE_HOST_DEVICE constexpr
auto
operator+(ScaledBasis<T,N> const& t, ArithmeticTuple<U...> const& u) {
return as_arithmetic_tuple(t) + u;
}
template <class... T, class U, int M>
CUTE_HOST_DEVICE constexpr
auto
operator+(ArithmeticTuple<T...> const& t, ScaledBasis<U,M> const& u) {
return t + as_arithmetic_tuple(u);
}
template <auto t, class U, int M>
CUTE_HOST_DEVICE constexpr
auto
operator+(C<t>, ScaledBasis<U,M> const& u) {
static_assert(t == 0, "ScaledBasis op+ error!");
return u;
}
template <class T, int N, auto u>
CUTE_HOST_DEVICE constexpr
auto
operator+(ScaledBasis<T,N> const& t, C<u>) {
static_assert(u == 0, "ScaledBasis op+ error!");
return t;
}
//
// Display utilities
//
template <class ArithTuple>
CUTE_HOST_DEVICE void print(ArithmeticTupleIterator<ArithTuple> const& iter)
{
printf("ArithTuple"); print(iter.coord_);
}
template <class T, int N>
CUTE_HOST_DEVICE void print(ScaledBasis<T,N> const& e)
{
print(e.value()); printf("@%d", N);
}
#if !defined(__CUDACC_RTC__)
template <class ArithTuple>
CUTE_HOST std::ostream& operator<<(std::ostream& os, ArithmeticTupleIterator<ArithTuple> const& iter)
{
return os << "ArithTuple" << iter.coord_;
}
template <class T, int N>
CUTE_HOST std::ostream& operator<<(std::ostream& os, ScaledBasis<T,N> const& e)
{
return os << e.value() << "@" << N;
}
#endif
} // end namespace cute
namespace CUTE_STL_NAMESPACE
{
template <class... T>
struct tuple_size<cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)>
{};
template <size_t I, class... T>
struct tuple_element<I, cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::tuple_element<I, CUTE_STL_NAMESPACE::tuple<T...>>
{};
template <class... T>
struct tuple_size<const cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)>
{};
template <size_t I, class... T>
struct tuple_element<I, const cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::tuple_element<I, const CUTE_STL_NAMESPACE::tuple<T...>>
{};
} // end namespace CUTE_STL_NAMESPACE
#ifdef CUTE_STL_NAMESPACE_IS_CUDA_STD
namespace std
{
#if defined(__CUDACC_RTC__)
template <class... _Tp>
struct tuple_size;
template <size_t _Ip, class... _Tp>
struct tuple_element;
#endif
template <class... T>
struct tuple_size<cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)>
{};
template <size_t I, class... T>
struct tuple_element<I, cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::tuple_element<I, CUTE_STL_NAMESPACE::tuple<T...>>
{};
template <class... T>
struct tuple_size<const cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)>
{};
template <size_t I, class... T>
struct tuple_element<I, const cute::ArithmeticTuple<T...>>
: CUTE_STL_NAMESPACE::tuple_element<I, const CUTE_STL_NAMESPACE::tuple<T...>>
{};
} // end namespace std
#endif // CUTE_STL_NAMESPACE_IS_CUDA_STD
| include/cute/numeric/arithmetic_tuple.hpp/0 | {
"file_path": "include/cute/numeric/arithmetic_tuple.hpp",
"repo_id": "include",
"token_count": 5832
} | 13 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#include <cuda_fp16.h>
#include "cutlass/arch/mma.h"
#include "cutlass/layout/matrix.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <typename LayoutA, typename LayoutB, typename LayoutC>
struct Mma<
gemm::GemmShape<2,1,1>,
1,
half_t,
LayoutA,
half_t,
LayoutB,
half_t,
LayoutC,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<2, 1, 1>;
using Operator = OpMultiplyAdd;
using ElementC = half_t;
CUTLASS_HOST_DEVICE
void operator()(
Array<half_t, 2> &d,
Array<half_t, 2> const &a,
Array<half_t, 1> const &b,
Array<half_t, 2> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600))
__half2 const & A = reinterpret_cast<__half2 const &>(a);
__half2 B = __half2half2(reinterpret_cast<__half const &>(b));
__half2 const & C = reinterpret_cast<__half2 const &>(c);
__half2 D = __hfma2(A, B, C);
d = reinterpret_cast<Array<half_t, 2> &>(D);
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
d[i] = a[i] * b[0] + c[i];
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <typename LayoutA, typename LayoutB>
struct Mma<
gemm::GemmShape<1,2,1>,
1,
half_t,
LayoutA,
half_t,
LayoutB,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<1, 2, 1>;
using Operator = OpMultiplyAdd;
using ElementC = half_t;
CUTLASS_HOST_DEVICE
void operator()(
Array<half_t, 2> &d,
Array<half_t, 1> const &a,
Array<half_t, 2> const &b,
Array<half_t, 2> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600))
__half2 const & A = __half2half2(reinterpret_cast<__half const &>(a));
__half2 B = reinterpret_cast<__half2 const &>(b);
__half2 const & C = reinterpret_cast<__half2 const &>(c);
__half2 D = __hfma2(A, B, C);
d = reinterpret_cast<Array<half_t, 2> &>(D);
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
d[i] = a[0] * b[i] + c[i];
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <>
struct Mma <
gemm::GemmShape<2, 2, 1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<2, 2, 1>;
using Operator = OpMultiplyAdd;
using ElementC = half_t;
CUTLASS_HOST_DEVICE
void operator()(
Array<half_t, 4> &d,
Array<half_t, 2> const &a,
Array<half_t, 2> const &b,
Array<half_t, 4> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600))
__half2 const & A = reinterpret_cast<__half2 const &>(a);
__half2 Blo = __low2half2(reinterpret_cast<__half2 const &>(b));
__half2 Bhi = __high2half2(reinterpret_cast<__half2 const &>(b));
__half2 const *C = reinterpret_cast<__half2 const *>(&c);
__half2 Dlo = __hfma2(A, Blo, C[0]);
__half2 Dhi = __hfma2(A, Bhi, C[1]);
Array<half_t, 2> * D = reinterpret_cast<Array<half_t, 2> *>(&d);
D[0] = reinterpret_cast<Array<half_t, 2> const &>(Dlo);
D[1] = reinterpret_cast<Array<half_t, 2> const &>(Dhi);
#else
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < 2; ++j) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
d[i + 2 * j] = a[i] * b[j] + c[i + 2 * j];
}
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <>
struct Mma<
gemm::GemmShape<2, 2, 1>,
1,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd> {
using Shape = gemm::GemmShape<2, 2, 1>;
using Operator = OpMultiplyAdd;
using ElementC = half_t;
CUTLASS_HOST_DEVICE
void operator()(
Array<half_t, 4> &d,
Array<half_t, 2> const &a,
Array<half_t, 2> const &b,
Array<half_t, 4> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600))
__half2 Alo = __low2half2(reinterpret_cast<__half2 const &>(a));
__half2 Ahi = __high2half2(reinterpret_cast<__half2 const &>(a));
__half2 const & B = reinterpret_cast<__half2 const &>(b);
__half2 const *C = reinterpret_cast<__half2 const *>(&c);
__half2 Dlo = __hfma2(Alo, B, C[0]);
__half2 Dhi = __hfma2(Ahi, B, C[0]);
Array<half_t, 2> * D = reinterpret_cast<Array<half_t, 2> *>(&d);
D[0] = reinterpret_cast<Array<half_t, 2> &>(Dlo);
D[1] = reinterpret_cast<Array<half_t, 2> &>(Dhi);
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < 2; ++j) {
d[i * 2 + j] = a[i] * b[j] + c[i * 2 + j];
}
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
| include/cutlass/arch/mma_sm60.h/0 | {
"file_path": "include/cutlass/arch/mma_sm60.h",
"repo_id": "include",
"token_count": 2675
} | 14 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/layout/matrix.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
//
// WMMA template structure defines nvcuda::wmma::fragments and static assert for
// wmma native instruction sizes supported for cutlass::int4b_t (experimental::s4).
//
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename LayoutA_,
typename LayoutB_,
typename LayoutC_>
struct Wmma<
Shape_, ///< Size of the matrix product (concept: GemmShape)
cutlass::int4b_t, ///< ElementA
LayoutA_, ///< LayoutA
cutlass::int4b_t, ///< ElementB
LayoutB_, ///< LayoutB
int32_t, ///< ElementC
LayoutC_, ///< LayoutC
cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc)
> {
#if defined(CUTLASS_ARCH_WMMA_SM75_ENABLED)
using Shape = Shape_;
using ElementA = cutlass::int4b_t;
using LayoutA = LayoutA_;
using ElementB = cutlass::int4b_t;
using LayoutB = LayoutB_;
using ElementC = int32_t;
using LayoutC = LayoutC_;
using Operator = cutlass::arch::OpMultiplyAdd;
using ArchTag = arch::Sm75;
// check supported wmma shape for the given multiplicand data types
static_assert(
platform::is_same<cutlass::gemm::GemmShape<8, 8, 32>, Shape>::value,
"Supported list of wmma operator shape for s8 multiplicands is: 8x8x32");
// Wmma Fragment
using FragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementA>::Type,
typename CutlassToWmmaLayout<LayoutA>::Layout>;
using FragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementB>::Type,
typename CutlassToWmmaLayout<LayoutB>::Layout>;
using FragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementC>::Type>;
/// Performs a nvcuda::wmma matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
nvcuda::wmma::mma_sync(D, A, B, C);
}
#else
static_assert(false, "wmma.mma.sync interger type multiplicands is avialable only for SM75 and beyond");
#endif
};
////////////////////////////////////////////////////////////////////////////////
//
// WMMA template structure defines nvcuda::wmma::fragments and static assert for
// wmma native instruction sizes supported for cutlass::uint1b_t (experimental::b1).
//
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename LayoutA_,
typename LayoutB_,
typename LayoutC_>
struct Wmma<
Shape_, ///< Size of the matrix product (concept: GemmShape)
cutlass::uint1b_t, ///< ElementA
LayoutA_, ///< LayoutA
cutlass::uint1b_t, ///< ElementB
LayoutB_, ///< LayoutB
int32_t, ///< ElementC
LayoutC_, ///< LayoutC
cutlass::arch::OpXorPopc ///< Operator (multiply-add, xor.popc)
> {
#if defined(CUTLASS_ARCH_WMMA_SM75_ENABLED)
using Shape = Shape_;
using ElementA = cutlass::uint1b_t;
using LayoutA = LayoutA_;
using ElementB = cutlass::uint1b_t;
using LayoutB = LayoutB_;
using ElementC = int32_t;
using LayoutC = LayoutC_;
using Operator = cutlass::arch::OpXorPopc;
using ArchTag = arch::Sm75;
// check supported wmma shape for the given multiplicand data types
static_assert(
platform::is_same<cutlass::gemm::GemmShape<8, 8, 128>, Shape>::value,
"Supported list of wmma operator shape for b1 multiplicands is: 8x8x128");
// Wmma Fragment
using FragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementA>::Type,
typename CutlassToWmmaLayout<LayoutA>::Layout>;
using FragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementB>::Type,
typename CutlassToWmmaLayout<LayoutB>::Layout>;
using FragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementC>::Type>;
/// Performs a nvcuda::wmma matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
nvcuda::wmma::bmma_sync(D, A, B, C, nvcuda::wmma::experimental::bmmaBitOpXOR,
nvcuda::wmma::experimental::bmmaAccumulateOpPOPC);
}
#else
static_assert(false, "wmma.mma.sync interger type multiplicands is avialable only for SM75 and beyond");
#endif
};
} // namespace arch
} // namespace cutlass
| include/cutlass/arch/wmma_sm75.h/0 | {
"file_path": "include/cutlass/arch/wmma_sm75.h",
"repo_id": "include",
"token_count": 3043
} | 15 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/conv/convnd_problem_shape.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::conv::collective::detail {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Construct the stride types for conv collectives based on the dispatch policy, strides 64b by default
template <class DispatchPolicy>
constexpr auto
sm90_dispatch_policy_to_stride_A() {
if constexpr (DispatchPolicy::ConvOp == conv::Operator::kFprop) {
// Maps to modes ((w,n), C)
if constexpr (DispatchPolicy::NumSpatialDimensions == 1) {
return cute::Stride<cute::Stride<int64_t, int64_t>,
cute::Int<1>>{};
}
// Maps to modes ((w,h,n), C)
else if constexpr (DispatchPolicy::NumSpatialDimensions == 2) {
return cute::Stride<cute::Stride<int64_t, int64_t, int64_t>,
cute::Int<1>>{};
}
// Maps to modes ((w,h,d,n), C)
else if constexpr (DispatchPolicy::NumSpatialDimensions == 3) {
return cute::Stride<cute::Stride<int64_t, int64_t, int64_t, int64_t>,
cute::Int<1>>{};
}
// error dims assert
else {
static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported spatial dim count.");
}
}
else if constexpr (DispatchPolicy::ConvOp == conv::Operator::kWgrad) {
// Maps to modes (k, nq/npq/nzpq)
if constexpr (DispatchPolicy::NumSpatialDimensions == 1 ||
DispatchPolicy::NumSpatialDimensions == 2 ||
DispatchPolicy::NumSpatialDimensions == 3) {
return cute::Stride<cute::Int<1>, int64_t>{};
}
// error dims assert
else {
static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported spatial dim count.");
}
}
else if constexpr (DispatchPolicy::ConvOp == conv::Operator::kDgrad) {
// Maps to modes ((q,n), K)
if constexpr (DispatchPolicy::NumSpatialDimensions == 1) {
return cute::Stride<cute::Stride<int64_t, int64_t>,
cute::Int<1>>{};
}
// Maps to modes ((q,p,n), K)
else if constexpr (DispatchPolicy::NumSpatialDimensions == 2) {
return cute::Stride<cute::Stride<int64_t, int64_t, int64_t>,
cute::Int<1>>{};
}
// Maps to modes ((q,p,z,n), K)
else if constexpr (DispatchPolicy::NumSpatialDimensions == 3) {
return cute::Stride<cute::Stride<int64_t, int64_t, int64_t, int64_t>,
cute::Int<1>>{};
}
// error dims assert
else {
static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported spatial dim count.");
}
}
else {
static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported ConvOp.");
}
}
// Construct the stirde types for conv collectives based on the dispatch policy, strides 64b by default
template <class DispatchPolicy>
constexpr auto
sm90_dispatch_policy_to_stride_B() {
if constexpr (DispatchPolicy::ConvOp == conv::Operator::kFprop) {
// Maps to modes (k, (C,s))
if constexpr (DispatchPolicy::NumSpatialDimensions == 1) {
return cute::Stride<int64_t, cute::Stride<cute::Int<1>, int64_t>>{};
}
// Maps to modes (k, (C,s,r))
else if constexpr (DispatchPolicy::NumSpatialDimensions == 2) {
return cute::Stride<int64_t, cute::Stride<cute::Int<1>, int64_t, int64_t>>{};
}
// Maps to modes (k, (C,s,r,t))
else if constexpr (DispatchPolicy::NumSpatialDimensions == 3) {
return cute::Stride<int64_t, cute::Stride<cute::Int<1>, int64_t, int64_t, int64_t>>{};
}
// error dims assert
else {
static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported spatial dim count.");
}
}
else if constexpr (DispatchPolicy::ConvOp == conv::Operator::kWgrad) {
// Maps to modes (C, (w,n))
if constexpr (DispatchPolicy::NumSpatialDimensions == 1) {
return cute::Stride<cute::Int<1>,
cute::Stride<int64_t, int64_t>>{};
}
// Maps to modes (C, (w,h,n))
else if constexpr (DispatchPolicy::NumSpatialDimensions == 2) {
return cute::Stride<cute::Int<1>,
cute::Stride<int64_t, int64_t, int64_t>>{};
}
// Maps to modes (C, (w,h,d,n))
else if constexpr (DispatchPolicy::NumSpatialDimensions == 3) {
return cute::Stride<cute::Int<1>,
cute::Stride<int64_t, int64_t, int64_t, int64_t>>{};
}
// error dims assert
else {
static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported spatial dim count.");
}
}
else if constexpr (DispatchPolicy::ConvOp == conv::Operator::kDgrad) {
// Maps to modes (C, (k,s))
if constexpr (DispatchPolicy::NumSpatialDimensions == 1) {
return cute::Stride<cute::Int<1>, cute::Stride<int64_t, int64_t>>{};
}
// Maps to modes (C, (k,s,r))
else if constexpr (DispatchPolicy::NumSpatialDimensions == 2) {
return cute::Stride<cute::Int<1>, cute::Stride<int64_t, int64_t, int64_t>>{};
}
// Maps to modes (C, (k,s,r,t))
else if constexpr (DispatchPolicy::NumSpatialDimensions == 3) {
return cute::Stride<cute::Int<1>, cute::Stride<int64_t, int64_t, int64_t, int64_t>>{};
}
// error dims assert
else {
static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported spatial dim count.");
}
}
else {
static_assert(cutlass::detail::dependent_false<DispatchPolicy>, "Unsupported ConvOp.");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// Compute the lower/near corner, returning it as a cute::array in [W,H,D] order
template <conv::Operator ConvOp, int NumSpatialDimensions>
CUTLASS_HOST_DEVICE
constexpr auto
compute_lower_corner_whd(ConvProblemShape<ConvOp, NumSpatialDimensions> const& problem_shape) {
using cute::for_each;
using cute::make_seq;
cute::array<int, NumSpatialDimensions> lower{};
if constexpr (ConvOp == conv::Operator::kFprop ||
ConvOp == conv::Operator::kWgrad) {
for_each(make_seq<NumSpatialDimensions>{}, [&](auto i) {
lower[NumSpatialDimensions-1-i] = -1 * problem_shape.lower_padding[i];
});
}
else if constexpr (ConvOp == conv::Operator::kDgrad) {
for_each(make_seq<NumSpatialDimensions>{}, [&](auto i) {
lower[NumSpatialDimensions-1-i] = problem_shape.lower_padding[i] -
(problem_shape.shape_B[i+1] - 1) * problem_shape.dilation[i];
});
}
return lower;
}
// Computes the upper/far corner, returning it as a cute::array in [W,H,D] order
template <conv::Operator ConvOp, int NumSpatialDimensions>
CUTLASS_HOST_DEVICE
constexpr auto
compute_upper_corner_whd(ConvProblemShape<ConvOp, NumSpatialDimensions> const& problem_shape) {
using cute::for_each;
using cute::make_seq;
cute::array<int, NumSpatialDimensions> upper{};
if constexpr (ConvOp == conv::Operator::kFprop) {
for_each(make_seq<NumSpatialDimensions>{}, [&](auto i) {
upper[NumSpatialDimensions-1-i] = problem_shape.upper_padding[i] -
(problem_shape.shape_B[i+1] - 1) * problem_shape.dilation[i];
});
}
else if constexpr (ConvOp == conv::Operator::kWgrad) {
for_each(make_seq<NumSpatialDimensions>{}, [&](auto i) {
upper[NumSpatialDimensions-1-i] = problem_shape.upper_padding[i] -
(problem_shape.shape_C[i+1] - 1) * problem_shape.dilation[i];
});
}
else if constexpr (ConvOp == conv::Operator::kDgrad) {
for_each(make_seq<NumSpatialDimensions>{}, [&](auto i) {
upper[NumSpatialDimensions-1-i] = problem_shape.lower_padding[i] -
(problem_shape.shape_B[i+1] - 1) * problem_shape.dilation[i] + problem_shape.shape_C[i+1] - problem_shape.shape_A[i+1];
});
}
return upper;
}
// Compute the lower/near corner of (t,r,s), returning it as a cute::array in [S,R,T] order
template <conv::Operator ConvOp, int NumSpatialDimensions>
CUTLASS_HOST_DEVICE
constexpr auto
compute_lower_srt(ConvProblemShape<ConvOp, NumSpatialDimensions> const& problem_shape) {
using cute::for_each;
using cute::make_seq;
cute::array<int, NumSpatialDimensions> lower{};
if constexpr (ConvOp == conv::Operator::kFprop ||
ConvOp == conv::Operator::kWgrad) {
for_each(make_seq<NumSpatialDimensions>{}, [&](auto i) {
lower[NumSpatialDimensions-1-i] = 0;
});
}
else if constexpr (ConvOp == conv::Operator::kDgrad) {
for_each(make_seq<NumSpatialDimensions>{}, [&](auto i) {
lower[NumSpatialDimensions-1-i] = (problem_shape.shape_B[i+1] - 1) * problem_shape.dilation[i];
});
}
return lower;
}
template <class CopyOp> struct is_im2col_load { static constexpr bool value = false; };
template <> struct is_im2col_load<SM90_TMA_LOAD_IM2COL > { static constexpr bool value = true; };
template <> struct is_im2col_load<SM90_TMA_LOAD_IM2COL_MULTICAST> { static constexpr bool value = true; };
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::conv::collective::detail
| include/cutlass/conv/collective/detail.hpp/0 | {
"file_path": "include/cutlass/conv/collective/detail.hpp",
"repo_id": "include",
"token_count": 4142
} | 16 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a multi-staged Depthwise Convolution kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/semaphore.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/epilogue/threadblock/output_iterator_parameter.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure
template <typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad)
typename Arguments_, ///! Kernel Arguments
typename ConvOutputIteratorParameter_, ///! Output Iterator Params
typename ConvProblemSize_ = Conv2dProblemSize, ///! Convolutional operator on 2D or 3D problem
conv::GroupMode GroupMode_ = conv::GroupMode::kNone, ///! Group mode
typename ThreadBlockOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1> > ///! OutputShape per ThreadBlock
struct DirectConvolutionParams {
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
static Operator const kConvolutionalOperator = ConvOperator;
using ConvProblemSize = ConvProblemSize_;
using Arguments = Arguments_;
using ConvOutputIteratorParameter = ConvOutputIteratorParameter_;
using ThreadblockShape = typename Mma::Shape;
static IteratorAlgorithm const kIteratorAlgorithm = Mma::IteratorA::kIteratorAlgorithm;
static conv::GroupMode const kGroupMode = GroupMode_;
static int const kStages = Mma::kStages;
ConvProblemSize problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
gemm::GemmCoord implicit_gemm_problem_size;
int swizzle_log_tile;
int smem_size_;
int gemm_k_iterations;
int gemm_k_iterations_per_channel;
typename Mma::IteratorA::Params iterator_A;
typename Mma::IteratorA::Element const *ptr_A;
typename Mma::IteratorB::Params iterator_B;
typename Mma::IteratorB::Element const *ptr_B;
typename Mma::IteratorB::Element *ptr_reordered_B;
typename Epilogue::OutputTileIterator::Params iterator_C;
typename Epilogue::OutputTileIterator::Element *ptr_C;
typename Epilogue::OutputTileIterator::Params iterator_D;
typename Epilogue::OutputTileIterator::Element *ptr_D;
typename EpilogueOutputOp::Params output_op;
int *semaphore;
SplitKMode split_k_mode;
int split_k_slices;
//
// Methods
//
CUTLASS_HOST_DEVICE
DirectConvolutionParams() : swizzle_log_tile(0), gemm_k_iterations(0) {}
///
CUTLASS_HOST_DEVICE
DirectConvolutionParams(Arguments const &args, int *semaphore = nullptr)
: problem_size(args.problem_size),
implicit_gemm_problem_size(
cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size)),
iterator_A(Mma::IteratorA::getParams(args.problem_size, args.ref_A.layout())),
ptr_A(args.ref_A.data()),
iterator_B(Mma::IteratorB::getParams(args.problem_size, args.ref_B.layout())),
ptr_B(args.ref_B.data()),
ptr_reordered_B(args.ref_reordered_B.data()),
iterator_C(ConvOutputIteratorParameter::layout(args.ref_C), args.problem_size),
ptr_C(args.ref_C.data()),
iterator_D(ConvOutputIteratorParameter::layout(args.ref_D), args.problem_size),
ptr_D(args.ref_D.data()),
output_op(args.output_op),
semaphore(semaphore),
split_k_mode(args.split_k_mode),
split_k_slices(args.problem_size.split_k_slices) {
gemm_k_iterations =
depthwise_gemm_k_iterations<ThreadBlockOutputShape::kN,
ThreadBlockOutputShape::kH,
ThreadBlockOutputShape::kW>(kConvolutionalOperator,
ThreadblockShape::kK,
args.problem_size,
kIteratorAlgorithm,
kGroupMode,
ThreadblockShape::kN);
gemm_k_iterations_per_channel = implicit_gemm_k_iterations_per_channel(
kConvolutionalOperator, args.problem_size, kIteratorAlgorithm);
ThreadblockSwizzle threadblock_swizzle;
grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
kConvolutionalOperator,
problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices);
swizzle_log_tile = threadblock_swizzle.get_log_tile(grid_tiled_shape);
// Dynamic SMEM usage because stride and dilation are runtime params.
smem_size_ = (max(iterator_A.activation_size, int(sizeof(typename Epilogue::SharedStorage))) * kStages + iterator_B.filter_size);
}
CUTLASS_HOST_DEVICE
int get_smem_size() {
// Dynamic Smem Size
return smem_size_;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Params_, typename ElementB_>
struct ReorderKernel {
using Params = Params_;
using ElementB = ElementB_;
union SharedStorage {};
static unsigned int const kReorderKernelThreadPerCTA = 128;
CUTLASS_HOST_DEVICE
ReorderKernel() {}
CUTLASS_HOST_DEVICE
static dim3 get_grid_shape(Params const ¶ms) {
return dim3{static_cast<unsigned int>(
(params.problem_size.filter_size() + kReorderKernelThreadPerCTA - 1) /
kReorderKernelThreadPerCTA),
1,
1};
}
CUTLASS_HOST_DEVICE
static dim3 get_block_shape() { return dim3{kReorderKernelThreadPerCTA, 1, 1}; }
CUTLASS_HOST_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
int64_t m = static_cast<int64_t>(params.problem_size.groups);
int64_t n = static_cast<int64_t>(params.problem_size.filter_size() / params.problem_size.K);
const ElementB *src_with_type = static_cast<const ElementB *>(params.ptr_B);
ElementB *dst_with_type = static_cast<ElementB *>(params.ptr_reordered_B);
int64_t linear_index = blockIdx.x * kReorderKernelThreadPerCTA + threadIdx.x;
int64_t index_m = linear_index / n;
int64_t index_n = linear_index % n;
int64_t new_linear_index = index_m + index_n * m;
if (linear_index < m * n) {
dst_with_type[new_linear_index] = src_with_type[linear_index];
}
return;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad)
typename ConvProblemSize_ = Conv2dProblemSize, ///! Convolutional operator on 2D or 3D problem
conv::GroupMode GroupMode_ = conv::GroupMode::kNone, ///! Group mode
typename ThreadBlockOutputShape_ = cutlass::conv::TensorNHWCShape<1, 1, 1, 1>
>
struct DirectConvolution {
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
static Operator const kConvolutionalOperator = ConvOperator;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename EpilogueOutputOp::ElementOutput;
/// Set output tensor C layout
using LayoutC = LayoutA;
using ElementAccumulator = typename EpilogueOutputOp::ElementAccumulator;
using ElementCompute = typename EpilogueOutputOp::ElementCompute;
using WarpMmaOperator = typename Mma::Policy::Operator;
using ArchMmaOperator = typename WarpMmaOperator::ArchMmaOperator;
using MathOperator = typename ArchMmaOperator::Operator;
using OperatorClass = typename WarpMmaOperator::OperatorClass;
using ArchTag = typename WarpMmaOperator::ArchTag;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename WarpMmaOperator::Shape;
using InstructionShape = typename cutlass::gemm::GemmShape<1, 1, 1>;
static int const kStages = Mma::kStages;
static IteratorAlgorithm const kIteratorAlgorithm = Mma::IteratorA::kIteratorAlgorithm;
static StrideSupport const kStrideSupport = Mma::IteratorA::kStrideSupport;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
using TensorRefA = typename Mma::IteratorA::TensorRef;
using TensorRefB = typename Mma::IteratorB::TensorRef;
using TensorRefC = cutlass::TensorRef<ElementC, LayoutC>;
/// Check iterator A and B convolution dimension are the same and
// set device::ImplicitGemmConvolution::kConvDim
static_assert(Mma::IteratorA::kConvDim == Mma::IteratorB::kConvDim,
"Convolution on different different dimensions is not supported");
static int const kConvDim = Mma::IteratorA::kConvDim;
/// Conv dimension and problem size structure (Conv2d or Conv3d)
using ConvProblemSize = ConvProblemSize_;
static conv::GroupMode const kGroupMode = GroupMode_;
//
//
//
using ConvOutputIteratorParameter = epilogue::threadblock::ConvOutputIteratorParameter<
LayoutC,
typename Epilogue::OutputTileIterator::Layout,
TensorRefC,
ConvOperator,
ConvProblemSize
>;
/// Argument structure
struct Arguments {
//
// Data members
//
ConvProblemSize problem_size;
TensorRefA ref_A;
TensorRefB ref_B;
TensorRefB ref_reordered_B;
TensorRefC ref_C;
TensorRefC ref_D;
typename EpilogueOutputOp::Params output_op;
SplitKMode split_k_mode;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments() { }
CUTLASS_HOST_DEVICE
Arguments(
ConvProblemSize const & problem_size
):
problem_size(problem_size) { }
CUTLASS_HOST_DEVICE
Arguments(
ConvProblemSize const & problem_size,
TensorRefA const & ref_A,
TensorRefB const & ref_B,
TensorRefC const & ref_C,
TensorRefC const & ref_D,
typename EpilogueOutputOp::Params const & output_op,
TensorRefB const & ref_reordered_B = nullptr,
SplitKMode const & split_k_mode = SplitKMode::kSerial
):
problem_size(problem_size),
ref_A(ref_A),
ref_B(ref_B),
ref_C(ref_C),
ref_D(ref_D),
output_op(output_op),
ref_reordered_B(ref_reordered_B),
split_k_mode(split_k_mode)
{
}
};
using Params =
typename cutlass::conv::kernel::DirectConvolutionParams<Mma,
Epilogue,
ThreadblockSwizzle,
kConvolutionalOperator,
Arguments,
ConvOutputIteratorParameter,
ConvProblemSize,
kGroupMode,
ThreadBlockOutputShape>;
using ReorderKernel = typename cutlass::conv::kernel::ReorderKernel<Params, ElementB>;
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
DirectConvolution() { }
/// Executes one ImplicitGEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_idx =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if threadblock is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_idx.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_idx.n()) {
return;
}
// Compute position within threadblock
int thread_idx = threadIdx.x;
int iterator_column_offset = 0;
int filter_row_offset = 0;
if (kGroupMode != GroupMode::kNone) {
if (kGroupMode == GroupMode::kDepthwise) {
iterator_column_offset += threadblock_tile_idx.n() * Mma::Shape::kN;
}
}
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.iterator_A,
params.problem_size,
params.ptr_A,
thread_idx,
MatrixCoord(
threadblock_tile_idx.m() + threadblock_tile_idx.k(),
iterator_column_offset
)
);
typename Mma::IteratorB iterator_B(
params.iterator_B,
params.problem_size,
params.ptr_reordered_B,
thread_idx,
MatrixCoord(
filter_row_offset,
iterator_column_offset
)
);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
// Compute logical position within grid
threadblock_tile_idx =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
MatrixCoord threadblock_offset(
threadblock_tile_idx.m() + threadblock_tile_idx.k(),
threadblock_tile_idx.n() * Mma::Shape::kN
);
// Tile iterator writing to destination tensor
typename Epilogue::OutputTileIterator iterator_D(
params.iterator_D,
params.ptr_D,
ConvOutputIteratorParameter::extent(params.problem_size),
thread_idx,
threadblock_offset
);
// Tile iterator reading from source accumulator tensor
typename Epilogue::OutputTileIterator iterator_C(
params.iterator_C,
params.ptr_C,
ConvOutputIteratorParameter::extent(params.problem_size),
thread_idx,
threadblock_offset
);
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Compute threadblock-scoped matrix multiply-add
// Epilogue is fused in the mainloop
mma(params.gemm_k_iterations,
accumulators,
iterator_A,
params.iterator_A,
iterator_B,
params.iterator_B,
accumulators,
epilogue,
output_op,
iterator_D,
iterator_C,
params.split_k_slices);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/direct_convolution.h/0 | {
"file_path": "include/cutlass/conv/kernel/direct_convolution.h",
"repo_id": "include",
"token_count": 7146
} | 17 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile)
matrix from memory.
This iterator assumes TensorNDHWC layout of tensors in Global Memory.
The iterator is specialized for each of the three convolution operators: forward propagation (Fprop),
backward data gradient (Dgrad), and backward weight gradient (Wgrad).
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/conv/threadblock/conv3d_params.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename Element_,
typename ThreadMap_,
bool IsDeconv_ = false
>
class Conv3dFpropFilterTileAccessIteratorAnalytic {
public:
//
// Types
//
using Shape = Shape_;
using Element = Element_;
using Layout = layout::TensorNDHWC;
using ThreadMap = ThreadMap_;
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
using TensorRef = cutlass::TensorRef<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
static bool const IsDeconv = IsDeconv_;
static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic;
static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided;
static int const kConvDim = 3;
using ConvProblemSize = typename conv::Conv3dProblemSize;
static int const kAccessesPerVector = 1;
//
// Simplifying assertions
//
static_assert(ThreadMap::Iterations::kContiguous == 1,
"Require Iterations::kContiguous == 1");
//
// Parameters structure
//
using Params = Conv3dAnalyticParams<Layout>;
private:
Params const ¶ms_;
ConvProblemSize const &problem_size_;
LongIndex iteration_contiguous_;
LongIndex iteration_strided_;
char const *pointer_;
int filter_t_;
int filter_r_;
int filter_s_;
int filter_c_;
int offset_k_[ThreadMap::Iterations::kStrided];
public:
CUTLASS_HOST_DEVICE
Conv3dFpropFilterTileAccessIteratorAnalytic(
Params const ¶ms,
ConvProblemSize const &problem_size,
Element const *ptr,
int thread_idx,
MatrixCoord const &threadblock_offset = MatrixCoord()
):
params_(params),
problem_size_(problem_size),
pointer_(reinterpret_cast<char const *>(ptr)),
filter_t_(0),
filter_r_(0),
filter_s_(0),
filter_c_(0) {
layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx);
filter_c_ = threadblock_offset.row() + thread_coord.contiguous();
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
offset_k_[s] = threadblock_offset.column() + thread_coord.strided() + s * ThreadMap::Delta::kStrided;
}
set_iteration_index(0);
}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(Index index) {
iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
pointer_ += pointer_offset * 8 / sizeof_bits<Element>::value;
}
CUTLASS_HOST_DEVICE
void advance() {
// moves to the next tile
++filter_s_;
if (filter_s_ < problem_size_.S) {
return;
}
filter_s_ = 0;
++filter_r_;
if (filter_r_ < problem_size_.R) {
return;
}
filter_r_ = 0;
++filter_t_;
if (filter_t_ < problem_size_.T) {
return;
}
filter_t_ = 0;
filter_c_ += Shape::kRow * problem_size_.split_k_slices;
}
/// Returns the coordinate in the filter tensor W that is currently pointed to
/// by the iterator.
CUTLASS_HOST_DEVICE
TensorCoord at() const {
int k = offset_k_[iteration_strided_];
return TensorCoord(k, filter_t_, filter_r_, filter_s_, filter_c_);
}
/// Returns true if the current coordinate is within the activations tensor W
CUTLASS_HOST_DEVICE
bool valid() const {
TensorCoord coord = at();
auto input_channels = (IsDeconv ? problem_size_.K : problem_size_.C);
auto output_channels = (IsDeconv ? problem_size_.C : problem_size_.K);
return coord.n() < output_channels &&
coord.c() < input_channels;
}
/// Returns a pointer to the vector starting at the current coordinate
CUTLASS_HOST_DEVICE
AccessType const *get() const {
TensorCoord coord = at();
LongIndex offset = params_.layout(coord);
return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8);
}
/// Increments to the next memory access
CUTLASS_HOST_DEVICE
Conv3dFpropFilterTileAccessIteratorAnalytic &operator++() {
++iteration_contiguous_;
if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
return *this;
}
iteration_contiguous_ = 0;
++iteration_strided_;
if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
return *this;
}
iteration_strided_ = 0;
return *this;
}
/// Determines whether the Implicit GEMM can execute the given problem.
CUTLASS_HOST_DEVICE
static Status can_implement(ConvProblemSize const &problem_size) {
auto input_channels = (IsDeconv ? problem_size.K : problem_size.C);
auto output_channels = (IsDeconv ? problem_size.C : problem_size.K);
// check alignment constraint on iterator's contiguous dimension
if (input_channels % AccessType::kElements) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_analytic.h/0 | {
"file_path": "include/cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_analytic.h",
"repo_id": "include",
"token_count": 2665
} | 18 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a multistage threadblock-scoped Implicit GEMM Convolution kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/cache_operation.h"
#include "cutlass/gemm/threadblock/mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class ImplicitGemmMultistage :
public gemm::threadblock::MmaBase<Shape_, Policy_, Stages> {
public:
///< Base class
using Base = gemm::threadblock::MmaBase<Shape_, Policy_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Iterates over tiles of A operand in global memory
using IteratorA = IteratorA_;
///< Iterates over tiles of B operand in global memory
using IteratorB = IteratorB_;
///< Policy describing tuning details
using Policy = Policy_;
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
//
// Dependent types
//
/// Fragment of accumulator tile
using ElementC = typename Policy::Operator::ElementC;
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Internal structure exposed for introspection.
struct Detail {
/// Number of cp.async instructions to load one stage of operand A
static int const AsyncCopyIterationsPerStageA =
IteratorA::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const AsyncCopyIterationsPerStageB =
IteratorB::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA =
(AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB =
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations;
// Optional staged-accumulation (e.g., tf32x3 kernels) for improved numerical
// accuracy, where each mainloop iteration first accumulates into a temporary
// set of freshly-cleared accumulators, which are subsequently added to the
// final accumulator set.
static bool const kStagedAccumulation = arch::detail::UseStagedAccumulation<Operator>::value;
};
private:
using WarpLoadedFragmentA = typename Operator::FragmentA;
using WarpLoadedFragmentB = typename Operator::FragmentB;
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
ImplicitGemmMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx),
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
CUTLASS_DEVICE
void copy_tiles_and_advance(
IteratorA &iterator_A, IteratorB &iterator_B,
int group_start_A = 0, int group_start_B = 0) {
iterator_A.set_iteration_index(group_start_A *
IteratorA::kAccessesPerVector);
this->smem_iterator_A_.set_iteration_index(group_start_A);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
}
iterator_B.set_iteration_index(group_start_B *
IteratorB::kAccessesPerVector);
this->smem_iterator_B_.set_iteration_index(group_start_B);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, iterator_B.get(), iterator_B.valid());
++iterator_B;
}
++this->smem_iterator_B_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations,
///< destination accumulator tile
FragmentC &accum,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
///< initial value of accumulator
FragmentC const &src_accum,
///< number of iterations per channel
int gemm_k_iterations_per_channel = 0,
///< Imaginary strides used for planar-complex only - ignored here
int64_t imag_stride_A = 0,
int64_t imag_stride_B = 0) {
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations) {
iterator_A.set_iteration_index(0);
this->smem_iterator_A_.set_iteration_index(0);
// Async Copy for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
typename IteratorA::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA::AccessType *>(
this->smem_iterator_A_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA::Element>::value *
IteratorA::ThreadMap::kElementsPerAccess /
IteratorA::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
dst_ptr + v, iterator_A.get(), iterator_A.valid());
++iterator_A;
}
++this->smem_iterator_A_;
}
iterator_B.set_iteration_index(0);
this->smem_iterator_B_.set_iteration_index(0);
// Async Copy for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
typename IteratorB::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB::AccessType *>(
this->smem_iterator_B_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB::Element>::value *
IteratorB::ThreadMap::kElementsPerAccess /
IteratorB::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
dst_ptr + v, iterator_B.get(), iterator_B.valid());
++iterator_B;
}
++this->smem_iterator_B_;
}
// Move to the next stage
iterator_A.advance();
iterator_B.advance();
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
accum = src_accum;
// Waits until kStages-2 stages have committed.
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA warp_loaded_frag_A[2];
WarpLoadedFragmentB warp_loaded_frag_B[2];
WarpTransformedFragmentA warp_transformed_frag_A[2];
WarpTransformedFragmentB warp_transformed_frag_B[2];
Operator warp_mma;
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
// Start issuing the first group of the next stage outside of the mainloop
copy_tiles_and_advance(iterator_A, iterator_B);
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0],
warp_loaded_frag_A[0], warp_loaded_frag_B[0]);
// tf32x3 kernels use staging accumulation. warp_mma uses a temporary
// accumulator and this temporary accumulator is added to the final
// accumulator once in every mainloop iteration.
plus<FragmentC> plus_accum;
FragmentC tmp_accum;
if (Detail::kStagedAccumulation) {
tmp_accum.clear();
}
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k > 0)
warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
warp_loaded_frag_A[warp_mma_k % 2],
warp_loaded_frag_B[warp_mma_k % 2]);
// Issue global->shared copies for the next stage
int group_start_iteration_A, group_start_iteration_B;
if (warp_mma_k + 1 == Base::kWarpGemmIterations) {
group_start_iteration_A = 0;
group_start_iteration_B = 0;
} else {
group_start_iteration_A =
(warp_mma_k + 1) * Detail::kAccessesPerGroupA;
group_start_iteration_B =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB;
}
copy_tiles_and_advance(iterator_A, iterator_B, group_start_iteration_A,
group_start_iteration_B);
if (Detail::kStagedAccumulation) {
warp_mma(
tmp_accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
tmp_accum
);
if (warp_mma_k == 0) {
accum = plus_accum(accum, tmp_accum);
tmp_accum.clear();
}
} else {
warp_mma(
accum,
warp_transformed_frag_A[warp_mma_k % 2],
warp_transformed_frag_B[warp_mma_k % 2],
accum
);
}
if (warp_mma_k + 1 == Base::kWarpGemmIterations)
warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
warp_transformed_frag_B[(warp_mma_k + 1) % 2],
warp_loaded_frag_A[(warp_mma_k + 1) % 2],
warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
// Inserts a fence to group cp.async instructions into stages.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages of cp.async have committed
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A.advance();
iterator_B.advance();
this->smem_iterator_A_.add_tile_offset({0, 1});
this->smem_iterator_B_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A_.add_tile_offset(
{0, -Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations;
}
}
}
if (Detail::kStagedAccumulation) {
accum = plus_accum(accum, tmp_accum);
}
// Insert fence and wait for all outstanding cp.async operations to commit.
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/implicit_gemm_multistage.h/0 | {
"file_path": "include/cutlass/conv/threadblock/implicit_gemm_multistage.h",
"repo_id": "include",
"token_count": 8208
} | 19 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cute/layout.hpp"
#include "cute/util/type_traits.hpp"
#include "cute/arch/copy_sm90_tma.hpp"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::detail {
////////////////////////////////////////////////////////////////////////////////////////////////////
// For each cutlass::layout, provides its corresponding cute stride types, 64b by default
template <class L>
struct TagToStrideA {
using type = L;
};
// Maps to modes [M, K, L]
template <>
struct TagToStrideA<layout::RowMajor> {
using type = cute::Stride<int64_t, cute::Int<1>, int64_t>;
using tag = layout::RowMajor;
};
// Maps to modes [M, K, L]
template <>
struct TagToStrideA<layout::ColumnMajor> {
using type = cute::Stride<cute::Int<1>, int64_t, int64_t>;
using tag = layout::ColumnMajor;
};
template <class L>
struct TagToStrideB {
using type = L;
};
// Maps to modes [N, K, L]
template <>
struct TagToStrideB<layout::RowMajor> {
using type = cute::Stride<cute::Int<1>, int64_t, int64_t>;
using tag = layout::RowMajor;
};
// Maps to modes [N, K, L]
template <>
struct TagToStrideB<layout::ColumnMajor> {
using type = cute::Stride<int64_t, cute::Int<1>, int64_t>;
using tag = layout::ColumnMajor;
};
// For each cutlass::layout *, provides its corresponding cute stride types, 64b by default
// Used by pointer array and grouped gemm
// Maps to modes [M, K, L]
template <>
struct TagToStrideA<layout::RowMajor *> {
using UnderlyingType = cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>;
using type = UnderlyingType*;
using tag = layout::RowMajor;
};
// Maps to modes [M, K, L]
template <>
struct TagToStrideA<layout::ColumnMajor *> {
using UnderlyingType = cute::Stride<cute::Int<1>, int64_t, cute::Int<0>>;
using type = UnderlyingType*;
using tag = layout::ColumnMajor;
};
// Maps to modes [N, K, L]
template <>
struct TagToStrideB<layout::RowMajor *> {
using UnderlyingType = cute::Stride<cute::Int<1>, int64_t, cute::Int<0>>;
using type = UnderlyingType*;
using tag = layout::RowMajor;
};
// Maps to modes [N, K, L]
template <>
struct TagToStrideB<layout::ColumnMajor *> {
using UnderlyingType = cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>;
using type = UnderlyingType*;
using tag = layout::ColumnMajor;
};
// Maps to modes [M, N, L]
template <class LayoutTag>
struct TagToStrideC : TagToStrideA<LayoutTag> { };
// Conv: Maps to modes ((P,N), C, _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorNWC> {
using type = cute::Stride<cute::Stride<int64_t, int64_t>, cute::Int<1>, cute::Int<0>>;
};
// Conv: Maps to modes ((P,Q,N), C, _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorNHWC> {
using type = cute::Stride<cute::Stride<int64_t, int64_t, int64_t>, cute::Int<1>, cute::Int<0>>;
};
// Conv: Maps to modes ((P,Q,Z,N), C, _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorNDHWC> {
using type = cute::Stride<cute::Stride<int64_t, int64_t, int64_t, int64_t>, cute::Int<1>, cute::Int<0>>;
};
// Conv: Maps to modes (K, (C,S), _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorKCS> {
using type = cute::Stride<int64_t, cute::Stride<cute::Int<1>, int64_t>, cute::Int<0>>;
};
// Conv: Maps to modes (K, (C,S,R), _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorKCSR> {
using type = cute::Stride<int64_t, cute::Stride<cute::Int<1>, int64_t, int64_t>, cute::Int<0>>;
};
// Conv: Maps to modes (K, (C,S,R,T), _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorKCSRT> {
using type = cute::Stride<int64_t, cute::Stride<cute::Int<1>, int64_t, int64_t, int64_t>, cute::Int<0>>;
};
// Conv: Maps to modes ((C,S), K, _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorCSK> {
using type = cute::Stride<cute::Stride<cute::Int<1>, int64_t>, int64_t, cute::Int<0>>;
};
// Conv: Maps to modes ((C,S,R), K, _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorCSRK> {
using type = cute::Stride<cute::Stride<cute::Int<1>, int64_t, int64_t>, int64_t, cute::Int<0>>;
};
// Conv: Maps to modes ((C,S,R,T), K, _0) for compatiblity with GEMM epilogues expecting a batch mode stride
template <>
struct TagToStrideC<cutlass::layout::TensorCSRTK> {
using type = cute::Stride<cute::Stride<cute::Int<1>, int64_t, int64_t, int64_t>, int64_t, cute::Int<0>>;
};
// Convenience aliases
template<class LayoutTag>
using TagToStrideA_t = typename TagToStrideA<LayoutTag>::type;
template<class LayoutTag>
using TagToStrideB_t = typename TagToStrideB<LayoutTag>::type;
template<class LayoutTag>
using TagToStrideC_t = typename TagToStrideC<LayoutTag>::type;
////////////////////////////////////////////////////////////////////////////////////////////////////
// For 2.x compatibility APIs, provide stride->layout tag mappers
template<int ModeIndex, class Stride>
constexpr bool
is_major(Stride = {}) {
// Account for stride types with and without batch mode and batch modes with static zero stride
return cute::is_constant<1, decltype(cute::front(cute::get<ModeIndex>(cute::remove_pointer_t<Stride>{})))>::value;
}
// Note : This method can be used for deducing the Layout Tag of A, C, D Matrices
template<class StrideA>
constexpr
auto
stride_to_layout_tag_A() {
if constexpr (is_major<0, StrideA>()) { // M major
return layout::ColumnMajor{};
}
else { // K major
return layout::RowMajor{};
}
CUTE_GCC_UNREACHABLE;
}
template<class StrideB>
constexpr
auto
stride_to_layout_tag_B() {
if constexpr (is_major<0, StrideB>()) { // N major
return layout::RowMajor{};
}
else { // K major
return layout::ColumnMajor{};
}
CUTE_GCC_UNREACHABLE;
}
template<class StrideC>
constexpr
auto
stride_to_layout_tag_C() {
if constexpr (is_major<0, StrideC>()) { // M major
return layout::ColumnMajor{};
}
else { // N major
return layout::RowMajor{};
}
CUTE_GCC_UNREACHABLE;
}
// Utilities to map Stride back on to their corresponding layout tags
template <class S>
struct StrideToLayoutTagA {
using type = decltype(detail::stride_to_layout_tag_A<S>());
};
template <class S>
struct StrideToLayoutTagB {
using type = decltype(detail::stride_to_layout_tag_B<S>());
};
template <class S>
struct StrideToLayoutTagC {
using type = decltype(detail::stride_to_layout_tag_C<S>());
};
// Convenience aliases
template<class S>
using StrideToLayoutTagA_t = typename StrideToLayoutTagA<S>::type;
template<class S>
using StrideToLayoutTagB_t = typename StrideToLayoutTagB<S>::type;
template<class S>
using StrideToLayoutTagC_t = typename StrideToLayoutTagC<S>::type;
////////////////////////////////////////////////////////////////////////////////////////////////////
// Inspects a tiled copy and whether its copy engine is TMA or not
template<class GmemTiledCopy>
constexpr bool is_tma_copy_engine() {
if constexpr (cute::is_void_v<GmemTiledCopy>) {
return false;
}
else {
if constexpr ( cute::is_base_of_v<cute::SM90_TMA_LOAD, GmemTiledCopy>
|| cute::is_base_of_v<cute::SM90_TMA_LOAD_MULTICAST, GmemTiledCopy>
|| cute::is_base_of_v<cute::SM90_TMA_LOAD_IM2COL, GmemTiledCopy>
|| cute::is_base_of_v<cute::SM90_TMA_LOAD_IM2COL_MULTICAST, GmemTiledCopy>
|| cute::is_base_of_v<cute::SM90_TMA_STORE, GmemTiledCopy>
|| cute::is_base_of_v<cute::SM90_TMA_STORE_IM2COL, GmemTiledCopy>
) {
return true;
}
}
return false;
}
template <class X, class = void>
struct RawDtype { using type = X; };
template <class X>
struct RawDtype<X,cute::void_t<typename X::raw_type>> { using type = typename X::raw_type; };
// Inspects a TiledCopy and returns its alignment in terms of element count
template <class GmemTiledCopy, class Element, class ElementMma = Element>
constexpr int
get_alignment_count_from_gmem_tiled_copy() {
if constexpr (cute::is_void_v<GmemTiledCopy>) {
return 1;
}
// Account for ElementC = void kernels
else if constexpr (cute::is_void_v<Element>) {
return 0;
}
else {
// For TMA tiled copies, we know the alignment has to be 128 bits
if constexpr (is_tma_copy_engine<GmemTiledCopy>()) {
return 128 / sizeof_bits<Element>::value;
}
else {
// For non-TMA tiled copies, TiledCopy holds the alignment count directly in its TiledShape_MN
return GmemTiledCopy::NumValSrc;
}
}
}
// Return alignment bit requirements for the GEMM inputs.
template <
class ElementType
>
constexpr int
get_input_alignment_bits() {
return 128;
}
// Return alignment bit requirements for the GEMM outputs.
template <class ElementType>
constexpr int
get_output_alignment_bits() {
return 128;
}
// Return the shape that is associated with stride-1 mode, or 1 if not found
template<typename Shape, typename Stride>
CUTLASS_HOST_DEVICE constexpr
auto
get_contiguous_shape(Shape const & shape, Stride const & stride) {
using namespace cute;
auto idx = find_if(append(flatten(stride), _1{}), [](auto s){ return is_constant<1,decltype(s)>{}; });
return get<decltype(idx)::value>(append(flatten(shape), _1{}));
}
// Check if tensor shape satisfies a given major alignment
template<int Alignment, class Shape, class Stride>
CUTLASS_HOST_DEVICE constexpr
bool
check_alignment(Shape const & shape, Stride const & stride) {
return is_major<0>(stride)
? get_contiguous_shape(cute::get<0>(shape), cute::get<0>(stride)) % Alignment == 0
: get_contiguous_shape(cute::get<1>(shape), cute::get<1>(stride)) % Alignment == 0;
}
// Check if tensor shape satisfies a given major alignment
template<int B, int M, int S>
CUTLASS_HOST_DEVICE constexpr
size_t
alignment_for_swizzle(cute::Swizzle<B, M, S>) {
static_assert(B >= 0 and M >= 0);
return size_t(1) << size_t(B + M + cute::abs(S));
}
template<class Layout>
CUTLASS_HOST_DEVICE constexpr
size_t
alignment_for_swizzle(Layout layout) {
return alignment_for_swizzle(cute::detail::get_swizzle_portion(layout));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::detail
| include/cutlass/detail/layout.hpp/0 | {
"file_path": "include/cutlass/detail/layout.hpp",
"repo_id": "include",
"token_count": 4492
} | 20 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/detail/dependent_false.hpp"
#include "cutlass/epilogue/fusion/operations.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::epilogue::fusion {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Dispatch interface for epilogue fusion callbacks
// For visitor fusions, this is just a convenience wrapper to provide metadata and non-nested args.
// It is also valid to just pass visitor callbacks directly to the collective, e.g. fusion::Sm90LinearCombination,
// provided the collective supports a visitor callbacks interface. This is useful for implementing custom fusions.
template <
class DispatchPolicy, // specialize on collective's dispatch policy since callbacks API will depend on collective's algorithm
class Operation, // the fusion operation being performed, e.g. fusion::LinearCombination
class CtaTile_MNK, // computed tile per CTA
class EpilogueTile_MN, // epilogue subtile size
class... Args // callbacks implementation dependent args (e.g. copy atoms, smem layouts)
>
struct FusionCallbacks {
static_assert(cutlass::detail::dependent_false<DispatchPolicy, Operation>, "Could not find a callbacks specialization.");
};
// Metadata helper to handle custom EVTs or other non-FusionCallbacks types
template <class T>
struct FusionCallbacksTraits {
using DispatchPolicy = void;
using Operation = T;
using CtaTile_MNK = void;
using EpilogueTile_MN = void;
using ElementCompute = void;
};
template <
class DispatchPolicy_,
class Operation_,
class CtaTile_MNK_,
class EpilogueTile_MN_,
class... Args
>
struct FusionCallbacksTraits<
FusionCallbacks<DispatchPolicy_, Operation_, CtaTile_MNK_, EpilogueTile_MN_, Args...>
> {
using DispatchPolicy = DispatchPolicy_;
using Operation = Operation_;
using CtaTile_MNK = CtaTile_MNK_;
using EpilogueTile_MN = EpilogueTile_MN_;
using ElementCompute = typename Operation::ElementCompute;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::epilogue::fusion
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/fusion/callbacks.hpp/0 | {
"file_path": "include/cutlass/epilogue/fusion/callbacks.hpp",
"repo_id": "include",
"token_count": 1049
} | 21 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped complex GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/linear_combination_sigmoid.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h"
#include "cutlass/epilogue/warp/fragment_iterator_gaussian_complex_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization and defines sensible defaults for epilogues for complex*complex case
// 4 real-valued mma operations (Complex)
// A = (ar + j ai), B (br +j bi), D = AB
// D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Epilogue Shape
typename Shape_,
/// Warp-level mma operator
typename WarpMmaTensorOp_,
/// Number of k partitions
int PartitionsK,
/// Epilogue output operator
typename OutputOp_,
/// Elements accessed by inner-most loop of AccumulatorFragmentIterator::load()
int ElementsPerAccess,
/// Multiply-add operator
/// Selects between (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex)
typename Operator_ = arch::OpMultiplyAddComplex
>
struct DefaultEpilogueComplexTensorOp {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using Operator = Operator_;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap,
ElementOutput
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
ElementAccumulator,
LayoutC
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator
>;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 0>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization and defines sensible defaults for epilogues for complex*complex case
// 3 real-valued mma operations (Gaussian Complex)
// A = (ar + j ai), B = (br +j bi), D = AB
// P1 = (ar + ai) * br, P2 = - ar * (br - bi), P3 = ai * (br + bi)
// D = dr + j di = (P1 - P3) + j (P1 + P2)
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueComplexTensorOp <Shape_, WarpMmaTensorOp_, PartitionsK,
OutputOp_, ElementsPerAccess,
arch::OpMultiplyAddGaussianComplex
> {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using Operator = arch::OpMultiplyAddGaussianComplex;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap,
ElementOutput
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorGaussianComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
ElementAccumulator,
LayoutC
>;
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator
>;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 0>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/default_epilogue_complex_tensor_op.h",
"repo_id": "include",
"token_count": 2868
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <typename Element_>
class DirectStoreEpilogueIterator {
public:
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = 1;
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
using Base = PredicatedTileIteratorParams;
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout) {
stride = layout.stride(0) * sizeof(Element);
}
CUTLASS_HOST_DEVICE
Params(Base const &base) :
Base(base) { }
};
public:
//
// Data members
//
Element *pointer; // pointer to the output matrix
LongIndex stride; // stride in elements between rows
TensorCoord extent; // extent of output matrix
int thread_idx; // thread index
TensorCoord threadblock_offset;
public:
/// Constructor
CUTLASS_DEVICE
DirectStoreEpilogueIterator(
PredicatedTileIteratorParams const & params,
Element *pointer_,
TensorCoord extent_,
int thread_idx_,
TensorCoord threadblock_offset_ = TensorCoord(),
int const * indices = nullptr
):
pointer(pointer_),
stride(params.stride / sizeof(Element)),
extent(extent_),
thread_idx(thread_idx_),
threadblock_offset(threadblock_offset_)
{
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/direct_store_epilogue_iterator.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/direct_store_epilogue_iterator.h",
"repo_id": "include",
"token_count": 1332
} | 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs.
This does not attempt to target any particular output layout. Instead, each threadblock
streams out its accumulator elements using 128b store operations. This assumes all threadblocks
have unique output tiles.
The target data layout is:
- threadblock indices mapped to linear offsets as (m, n, k), where m is fastest-changing
- threadblock output space partitioned into warps; each warp's region is contiguous
- per-thread accumulators partitioned into 128b accesses
- output memory striped across the threads of a warp
This enables very fast streaming of data, completely limited by the memory system. No predication
or data exchange is performed, and each threadblock is assumed to have a full region of memory
to write to.
This epilogue establishes an upper bound for epilogue performance and is suitable for
reductions across the GEMM K dimension which require a separate workspace.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_, ///< shape of accumulator tile (concept: MatrixShape)
int WarpCount, ///< number of warps
typename FragmentC_ ///< warp-level GEMM operator (concept: gemm::warp::Mma)
>
class EpilogueWorkspace {
public:
using Shape = Shape_;
using FragmentC = FragmentC_;
using ElementC = typename FragmentC::value_type;
static int const kWarpCount = WarpCount;
/// Optimize for 128b accesses
static int const kAccessSizeInBits = 128;
/// Warp size from the perspective of memory operations
static int const kWarpSize = 32;
/// Vector length of accesses
static int const kElementsPerAccess =
kAccessSizeInBits / sizeof_bits<ElementC>::value;
/// Number of stores per thread
static int const kIterations = FragmentC::kElements / kElementsPerAccess;
static_assert(
!(FragmentC::kElements % kElementsPerAccess),
"The number of accumulators must be divisible by the access size.");
/// Total number of vectorized accesses in warp (in units of vector)
static int const kWarpAccesses = kIterations * kWarpSize;
/// Total number of vectorized accesses in threadblock tile (in units of vector)
static int const kThreadblockAccesses = kWarpAccesses * kWarpCount;
/// Parameters structure
struct Params {
/// Pointer to C matrix
ElementC *ptr_C;
/// Stride between tiles along the GEMM N dimension (in units of vectors)
int stride_n;
/// Stride between tiles along the GEMM K dimension (in units of vectors)
int stride_k;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(
ElementC *ptr_C, ///< Pointer to C matrix
int stride_n_, ///< Stride between tiles along the GEMM N dimension (in units of ElementC)
int stride_k_ ///< Stride between tiles along the GEMM K dimension (in units of ElementC)
):
ptr_C(ptr_C), stride_n(stride_n_ / kElementsPerAccess), stride_k(stride_k_ / kElementsPerAccess) {
}
};
/// Shared storage allocation needed by the epilogue
struct SharedStorage {
// Intentionally empty
};
private:
struct alignas((kAccessSizeInBits / 8)) AccessType {
Array<ElementC, kElementsPerAccess> storage;
};
/// Constant reference to parameters object
AccessType *pointer_;
/// Stride between tiles along the n dimension (in vectors)
int stride_n_;
/// Stride between tiles along the k dimension (in vectors)
int stride_k_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWorkspace(
Params const ¶ms, ///< Host-constructable params object
SharedStorage &, ///< Shared storage object
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
pointer_(reinterpret_cast<AccessType *>(params.ptr_C)),
stride_n_(params.stride_n),
stride_k_(params.stride_k) {
// Add per-thread offset
pointer_ += lane_idx + warp_idx * kWarpAccesses;
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
cutlass::gemm::GemmCoord problem_size, ///< Problem size of GEMM (units of ElementC)
cutlass::gemm::GemmCoord tb_tile_coord, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
FragmentC const &accum) { ///< Accumulator tile
// Compute offset for entire threadblock (note, per-thread offset has been folded in already)
AccessType *pointer = pointer_ +
tb_tile_coord.m() * kThreadblockAccesses +
tb_tile_coord.n() * stride_n_ +
tb_tile_coord.k() * stride_k_;
// Cast to vectorized view of accumulator fragments
AccessType const * src_pointer = reinterpret_cast<AccessType const *>(&accum);
// Write out accumulators at full speed
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kIterations; ++i) {
pointer[i * kWarpSize] = src_pointer[i];
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_workspace.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_workspace.h",
"repo_id": "include",
"token_count": 2162
} | 24 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief PredicatedTileIteratorPredicates.
PredicatedTileIteratorPredicates enables both upper and lower bounds for predicates.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/memory.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator predicates used to bound computations in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_ ///< Element data type
>
class PredicatedTileIteratorPredicates {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert( ThreadMap::Iterations::kRow > 0,"ThreadMap::Iterations::kRow must be > 0");
static_assert( ThreadMap::Iterations::kGroup > 0,"ThreadMap::Iterations::kGroup must be > 0");
static_assert( ThreadMap::Iterations::kCluster > 0,"ThreadMap::Iterations::kCluster must be > 0");
static_assert( ThreadMap::Iterations::kColumn > 0,"ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn *
ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Layout const &layout):
PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()
)
{
}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorParams params_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index lower_extent_row_;
Index upper_extent_row_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// Internal state counter
int state_[3];
//
// Static asserts about internal strides
//
static_assert(sizeof(lower_extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(upper_extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorPredicates(
PredicatedTileIteratorParams const & params,
TensorCoord lower_extent,
TensorCoord upper_extent,
int thread_idx,
TensorCoord threadblock_offset = TensorCoord()
):
params_(params)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
lower_extent_row_ = lower_extent.row();
upper_extent_row_ = upper_extent.row();
thread_start_row_ = thread_offset.row();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] = ((thread_offset.column()
+ ThreadMap::Delta::kColumn * c) < upper_extent.column()) &&
((thread_offset.column() + ThreadMap::Delta::kColumn * c) >= lower_extent.column());
}
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorPredicates &operator++() {
++state_[0];
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Gets the mask
CUTLASS_DEVICE void get_mask(Mask &mask) {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const &mask) {
mask_ = mask;
}
///< Gets lower_extent_row_
CUTLASS_DEVICE Index get_lower_extent_row() {
return lower_extent_row_;
}
///< Gets upper_extent_row_
CUTLASS_DEVICE Index get_upper_extent_row() {
return upper_extent_row_;
}
///< Gets thread_start_row_
CUTLASS_DEVICE Index get_thread_start_row() {
return thread_start_row_;
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/predicated_tile_iterator_predicates.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/predicated_tile_iterator_predicates.h",
"repo_id": "include",
"token_count": 3058
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/epilogue/warp/tensor_op_policy.h"
#include "cutlass/epilogue/warp/volta_tensor_op_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename InterleavedTileShape, ///< shape of indivisible instruction-level arrangement (concept: GemmShape)
typename ElementC, ///< Accumulator layout
typename Layout ///< target shared memory layout
>
struct TileIteratorVoltaTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape)
>
struct TileIteratorVoltaTensorOp<WarpShape_, gemm::GemmShape<32, 32, 4>, half_t, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using InterleavedTileShape = gemm::GemmShape<32, 32, 4>;
using Element = half_t;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = VoltaTensorOpPolicy<WarpShape, InterleavedTileShape, Element, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// Array type for aligned memory accesses
using AccessType = typename Policy::AccessType;
/// This is the fragment size produced by one access of the iterator.
using Fragment = typename Policy::Fragment;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = typename Policy::AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Number of elements per access
static int const kElementsPerAccess = Policy::kElementsPerAccess;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
static int const kRowsPerQuad = 4;
static int const kColumnsPerQuad = 8;
static int const kAccessesPerQuad = kColumnsPerQuad / Policy::kElementsPerAccess;
static int const kAccessQuadDelta = 16;
};
/// Padding quantity
using Padding = MatrixShape<
0,
Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
TileIteratorVoltaTensorOp(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / Policy::kElementsPerAccess) {
int quad_id = lane_id / Detail::kLanesInQuad;
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
int quad_row_idx = ((quad_id & 4) >> 1) + (quad_id & 1);
int quad_col_idx = ((quad_id & 2) >> 1);
int row = quad_row_idx * Detail::kRowsPerQuad + lane_in_quad;
int column = quad_col_idx * Detail::kColumnsPerQuad;
pointer_ += layout_({row, column / kElementsPerAccess});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / Policy::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_tile_offset(TensorCoord const &tile_offset) {
pointer_ += layout_({
tile_offset.row() * Shape::kRow,
tile_offset.column() * Shape::kColumn / Policy::kElementsPerAccess});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int tile_idx = 0; tile_idx < Policy::TileIterations::kColumn; ++tile_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < Policy::kAccessesPerInterleavedTile; ++access_idx) {
int access_quad = access_idx / 2;
int access = access_idx % 2;
int ptr_offset = tile_idx * InterleavedTileShape::kN / Policy::kElementsPerAccess +
access_quad * Detail::kAccessQuadDelta / Policy::kElementsPerAccess +
access + pointer_offset / Policy::kElementsPerAccess;
int frag_idx = tile_idx * Policy::kAccessesPerInterleavedTile + access_idx;
AccessType access_vector = frag_ptr[frag_idx];
pointer_[ptr_offset] = access_vector;
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int tile_idx = 0; tile_idx < Policy::TileIterations::kColumn; ++tile_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < Policy::kAccessesPerInterleavedTile; ++access_idx) {
int access_quad = access_idx / 2;
int access = access_idx % 2;
int ptr_offset = tile_idx * Detail::kTileDelta + access_quad * Detail::kAccessQuadDelta +
access + pointer_offset / Policy::kElementsPerAccess;
int frag_idx = tile_idx * Policy::kAccessesPerInterleavedTile + access_idx;
frag_ptr[frag_idx] = pointer_[ptr_offset];
}
}
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment const &frag) {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template for reading and writing tiles of accumulators to shared memory
template <
typename WarpShape_ ///< shape of warp-level GEMM (concept: MatrixShape)
>
struct TileIteratorVoltaTensorOp<WarpShape_, gemm::GemmShape<32, 32, 4>, float, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using InterleavedTileShape = gemm::GemmShape<32, 32, 4>;
using Element = float;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>; ///< Tensor Reference object
using TensorCoord = MatrixCoord; ///< Logical coordinate in referenced tensor
using Index = typename TensorRef::Index;
using LongIndex = typename TensorRef::LongIndex;
using Policy = VoltaTensorOpPolicy<WarpShape, InterleavedTileShape, Element, Layout>;
/// Shape of the tile in memory
using Shape = MatrixShape<
Policy::kRowsPerIteration,
WarpShape::kN
>;
/// Array type for aligned memory accesses
using AccessType = typename Policy::AccessType;
/// This is the fragment size produced by one access of the iterator.
using Fragment = typename Policy::Fragment;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = typename Policy::AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
/// Number of elements per access
static int const kElementsPerAccess = Policy::kElementsPerAccess;
// Internal constants
struct Detail {
static int const kLanesInQuad = 4;
static int const kRowsPerQuad = 4;
static int const kColumnsPerQuad = 8;
static int const kAccessesPerQuad = kColumnsPerQuad / Policy::kElementsPerAccess;
static int const kAccessQuadDelta = 16;
};
/// Padding quantity
using Padding = MatrixShape<
0,
Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Internal pointer to memory
AccessType *pointer_;
/// Internal layout object
Layout layout_;
public:
/// Default constructor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp(): pointer_(nullptr) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
TileIteratorVoltaTensorOp(
TensorRef const &ref,
unsigned lane_id
):
pointer_(reinterpret_cast<AccessType *>(ref.data())),
layout_(ref.stride()[0] / Policy::kElementsPerAccess) {
int quad_id = lane_id / Detail::kLanesInQuad;
int lane_in_quad = (lane_id % Detail::kLanesInQuad);
int const kQuadRowDelta = 4;
int const kQuadColumnDelta = 2 * Policy::MmaIterations::kColumn;
int quad_row_offset = ((quad_id & 4) / 2 + (quad_id & 1)) * kQuadRowDelta;
int quad_column_offset = (quad_id & 2) / 2 * kQuadColumnDelta;
int thread_row_offset = (lane_in_quad & 1);
int thread_column_offset = (lane_in_quad & 2) / 2;
int row = quad_row_offset + thread_row_offset;
int column = quad_column_offset + thread_column_offset;
pointer_ += layout_({row, column});
}
/// Adds a pointer offset
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_pointer_offset(Index pointer_offset) {
pointer_ += pointer_offset / Policy::kElementsPerAccess;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & add_tile_offset(TensorCoord const &tile_offset) {
pointer_ += layout_({
tile_offset.row() * Shape::kRow,
tile_offset.column() * Shape::kColumn / Policy::kElementsPerAccess});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_HOST_DEVICE
TileIteratorVoltaTensorOp & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Store
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
int const kAccessesPerRow = Policy::TileIterations::kColumn * Policy::MmaIterations::kColumn * 2;
CUTLASS_PRAGMA_UNROLL
for (int row_idx = 0; row_idx < Policy::kRowsPerMmaTile; ++row_idx) {
CUTLASS_PRAGMA_UNROLL
for (int access_idx = 0; access_idx < kAccessesPerRow; ++access_idx) {
int frag_idx = row_idx * kAccessesPerRow + access_idx;
int ptr_column_offset = (access_idx & 1) * 2 +
(access_idx & 2) * Policy::MmaIterations::kColumn * 2 +
(access_idx & 4) * Policy::MmaIterations::kColumn * 2;
int ptr_row_offset = row_idx * 2;
int ptr_offset = layout_({ptr_row_offset, ptr_column_offset}) + pointer_offset / Policy::kElementsPerAccess;
pointer_[ptr_offset] = frag_ptr[frag_idx];
}
}
}
/// Store
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
/// Load
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
assert(0);
}
/// Load
CUTLASS_HOST_DEVICE
void load(Fragment const &frag) {
load_with_pointer_offset(frag, 0);
}
/// Set smem base address
CUTLASS_HOST_DEVICE
void set_smem_base_address(Index address) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/warp/tile_iterator_volta_tensor_op.h/0 | {
"file_path": "include/cutlass/epilogue/warp/tile_iterator_volta_tensor_op.h",
"repo_id": "include",
"token_count": 4836
} | 26 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cute/algorithm/functional.hpp"
#include "cute/atom/mma_atom.hpp"
#include "cute/algorithm/gemm.hpp"
#include "cute/tensor_predicate.hpp"
#include "cute/numeric/arithmetic_tuple.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::collective {
using namespace cute;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
int Stages,
class TileShape_,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_>
struct CollectiveMma<
MainloopSm80CpAsyncUnpredicated<Stages>,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_
>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm80CpAsyncUnpredicated<Stages>;
using TileShape = TileShape_;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC;
using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
// Follow the change in TestSmall: TileShape switch to CtaShape
// For sm80 arch, CtaShape should euqal to TileShape
using CtaShape_MNK = TileShape;
static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
using SmemLayoutA = decltype(tile_to_shape(
SmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
using SmemLayoutB = decltype(tile_to_shape(
SmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
static_assert(DispatchPolicy::Stages >= 2, "CpAsync mainloop must have at least 2 stages in the pipeline.");
struct SharedStorage
{
cute::array_aligned<ElementA, cute::cosize_v<SmemLayoutA>> smem_a;
cute::array_aligned<ElementB, cute::cosize_v<SmemLayoutB>> smem_b;
};
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A;
StrideA dA;
ElementB const* ptr_B;
StrideB dB;
};
// Device side kernel params
using Params = Arguments;
//
// Methods
//
CollectiveMma() = default;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& _, Arguments const& args, void* workspace) {
(void) workspace;
return args;
}
/// Perform a collective-scoped matrix multiply-accumulate
template <
class FrgTensorD,
class TensorA,
class TensorB,
class FrgTensorC,
class KTileIterator,
class ResidueMNK
>
CUTLASS_DEVICE void
operator() (
FrgTensorD &accum,
TensorA gA,
TensorB gB,
FrgTensorC const &src_accum,
KTileIterator k_tile_iter, int k_tile_count,
ResidueMNK residue_mnk,
int thread_idx,
char *smem_buf)
{
using namespace cute;
static_assert(is_rmem<FrgTensorD>::value, "D tensor must be rmem resident.");
static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident.");
static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident.");
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 3,
"MainloopSm80CpAsync must have a pipeline mode in the smem layout.");
static_assert(cute::rank(SmemLayoutB{}) == 3,
"MainloopSm80CpAsync must have a pipeline mode in the smem layout.");
// Construct shared memory tiles
SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf);
Tensor sA = make_tensor(make_smem_ptr(storage.smem_a.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(storage.smem_b.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
CUTE_STATIC_ASSERT_V(size<0>(gA) == size<0>(sA)); // BLK_M
CUTE_STATIC_ASSERT_V(size<1>(gA) == size<1>(sA)); // BLK_K
CUTE_STATIC_ASSERT_V(size<0>(gB) == size<0>(sB)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(gB) == size<1>(sB)); // BLK_K
CUTE_STATIC_ASSERT_V(size<1>(sA) == size<1>(sB)); // BLK_K
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE
// Partition the copying of A and B tiles across the threads
GmemTiledCopyA gmem_tiled_copy_A;
GmemTiledCopyB gmem_tiled_copy_B;
auto gmem_thr_copy_A = gmem_tiled_copy_A.get_slice(thread_idx);
auto gmem_thr_copy_B = gmem_tiled_copy_B.get_slice(thread_idx);
Tensor tAgA = gmem_thr_copy_A.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k)
Tensor tAsA = gmem_thr_copy_A.partition_D(sA); // (ACPY,ACPY_M,ACPY_K,PIPE)
Tensor tBgB = gmem_thr_copy_B.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k)
Tensor tBsB = gmem_thr_copy_B.partition_D(sB); // (BCPY,BCPY_N,BCPY_K,PIPE)
//
// PREDICATES
//
(void) residue_mnk;
//assert(residue_mnk == make_tuple(0,0,0));
//
// PREFETCH
//
// Start async loads for all pipes but the last
CUTLASS_PRAGMA_UNROLL
for (int k_pipe = 0; k_pipe < DispatchPolicy::Stages-1; ++k_pipe) {
copy(gmem_tiled_copy_A, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,k_pipe));
copy(gmem_tiled_copy_B, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,k_pipe));
cp_async_fence();
--k_tile_count;
if (k_tile_count > 0) { ++k_tile_iter; }
}
//
// MMA Atom partitioning
//
// Tile MMA compute thread partitions and allocate accumulators
TiledMma tiled_mma;
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCrA = thr_mma.partition_fragment_A(sA(_,_,0)); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.partition_fragment_B(sB(_,_,0)); // (MMA,MMA_N,MMA_K)
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(src_accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(src_accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K
CUTE_STATIC_ASSERT_V(size(gmem_tiled_copy_A) == size(tiled_mma));
CUTE_STATIC_ASSERT_V(size(gmem_tiled_copy_B) == size(tiled_mma));
//
// Copy Atom retiling
//
auto smem_tiled_copy_A = make_tiled_copy_A(SmemCopyAtomA{}, tiled_mma);
auto smem_thr_copy_A = smem_tiled_copy_A.get_thread_slice(thread_idx);
Tensor tCsA = smem_thr_copy_A.partition_S(sA); // (CPY,CPY_M,CPY_K,PIPE)
Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA); // (CPY,CPY_M,CPY_K)
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCrA_copy_view)); // CPY_K
auto smem_tiled_copy_B = make_tiled_copy_B(SmemCopyAtomB{}, tiled_mma);
auto smem_thr_copy_B = smem_tiled_copy_B.get_thread_slice(thread_idx);
Tensor tCsB = smem_thr_copy_B.partition_S(sB); // (CPY,CPY_N,CPY_K,PIPE)
Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB); // (CPY,CPY_N,CPY_K)
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // CPY_N
CUTE_STATIC_ASSERT_V(size<2>(tCsB) == size<2>(tCrB_copy_view)); // CPY_K
//
// PIPELINED MAIN LOOP
//
// Current pipe index in smem to read from
int smem_pipe_read = 0;
// Current pipe index in smem to write to
int smem_pipe_write = DispatchPolicy::Stages-1;
Tensor tCsA_p = tCsA(_,_,_,smem_pipe_read);
Tensor tCsB_p = tCsB(_,_,_,smem_pipe_read);
// Size of the register pipeline
auto K_BLOCK_MAX = size<2>(tCrA);
// PREFETCH register pipeline
if (K_BLOCK_MAX > 1) {
// Wait until our first prefetched tile is loaded in
cp_async_wait<DispatchPolicy::Stages-2>();
__syncthreads();
// Prefetch the first rmem from the first k-tile
copy(smem_tiled_copy_A, tCsA_p(_,_,Int<0>{}), tCrA_copy_view(_,_,Int<0>{}));
copy(smem_tiled_copy_B, tCsB_p(_,_,Int<0>{}), tCrB_copy_view(_,_,Int<0>{}));
}
CUTLASS_PRAGMA_NO_UNROLL
while (k_tile_count > -(DispatchPolicy::Stages-1))
{
// Pipeline the outer products with a static for loop.
//
// Note, the for_each() function is required here to ensure `k_block` is of type Int<x>.
for_each(make_int_sequence<K_BLOCK_MAX>{}, [&] (auto k_block)
{
if (k_block == K_BLOCK_MAX - 1)
{
// Slice the smem_pipe_read smem
tCsA_p = tCsA(_,_,_,smem_pipe_read);
tCsB_p = tCsB(_,_,_,smem_pipe_read);
// Commit the smem for smem_pipe_read
cp_async_wait<DispatchPolicy::Stages-2>();
__syncthreads();
}
// Load A, B shmem->regs for k_block+1
auto k_block_next = (k_block + Int<1>{}) % K_BLOCK_MAX; // static
copy(smem_tiled_copy_A, tCsA_p(_,_,k_block_next), tCrA_copy_view(_,_,k_block_next));
copy(smem_tiled_copy_B, tCsB_p(_,_,k_block_next), tCrB_copy_view(_,_,k_block_next));
// Copy gmem to smem before computing gemm on each k-pipe
if (k_block == 0)
{
copy(gmem_tiled_copy_A, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,smem_pipe_write));
copy(gmem_tiled_copy_B, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,smem_pipe_write));
cp_async_fence();
// Advance the tile
--k_tile_count;
if (k_tile_count > 0) { ++k_tile_iter; }
// Advance the pipe -- Doing it here accounts for K_BLOCK_MAX = 1 (no rmem pipe)
smem_pipe_write = smem_pipe_read;
++smem_pipe_read;
smem_pipe_read = (smem_pipe_read == DispatchPolicy::Stages) ? 0 : smem_pipe_read;
}
// Transform before compute
cute::transform(tCrA(_,_,k_block), TransformA{});
cute::transform(tCrB(_,_,k_block), TransformB{});
// Thread-level register gemm for k_block
cute::gemm(tiled_mma, accum, tCrA(_,_,k_block), tCrB(_,_,k_block), src_accum);
});
}
cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
int Stages,
class ClusterShape_,
class TileShape_,
class ElementA_,
class StrideA_,
class ElementB_,
class StrideB_,
class TiledMma_,
class GmemTiledCopyA_,
class SmemLayoutAtomA_,
class SmemCopyAtomA_,
class TransformA_,
class GmemTiledCopyB_,
class SmemLayoutAtomB_,
class SmemCopyAtomB_,
class TransformB_
>
struct CollectiveMma<
MainloopSm80CpAsync<
Stages,
ClusterShape_>,
TileShape_,
ElementA_,
StrideA_,
ElementB_,
StrideB_,
TiledMma_,
GmemTiledCopyA_,
SmemLayoutAtomA_,
SmemCopyAtomA_,
TransformA_,
GmemTiledCopyB_,
SmemLayoutAtomB_,
SmemCopyAtomB_,
TransformB_
>
{
//
// Type Aliases
//
using DispatchPolicy = MainloopSm80CpAsync<
Stages,
ClusterShape_>;
using TileShape = TileShape_;
// Follow the change in TestSmall: TileShape switch to CtaShape
// In legacy arch, it should be same
using CtaShape_MNK = TileShape;
using ElementA = ElementA_;
using StrideA = StrideA_;
using ElementB = ElementB_;
using StrideB = StrideB_;
using TiledMma = TiledMma_;
using ElementAccumulator = typename TiledMma::ValTypeC; using GmemTiledCopyA = GmemTiledCopyA_;
using GmemTiledCopyB = GmemTiledCopyB_;
using SmemLayoutAtomA = SmemLayoutAtomA_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using SmemCopyAtomA = SmemCopyAtomA_;
using SmemCopyAtomB = SmemCopyAtomB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
using ArchTag = typename DispatchPolicy::ArchTag;
static_assert(cute::rank(SmemLayoutAtomA{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<0>(TileShape{}) % size<0>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomA{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert(cute::rank(SmemLayoutAtomB{}) == 2, "SmemLayoutAtom must be rank 2 (M/N, K)");
static_assert((size<1>(TileShape{}) % size<0>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomB{})) == 0, "SmemLayoutAtom must evenly divide tile shape.");
using SmemLayoutA = decltype(tile_to_shape(
SmemLayoutAtomA{},
make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
using SmemLayoutB = decltype(tile_to_shape(
SmemLayoutAtomB{},
make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), Int<DispatchPolicy::Stages>{})));
static_assert(DispatchPolicy::Stages >= 2, "CpAsync mainloop must have at least 2 stages in the pipeline.");
struct SharedStorage
{
cute::array_aligned<ElementA, cute::cosize_v<SmemLayoutA>> smem_a;
cute::array_aligned<ElementB, cute::cosize_v<SmemLayoutB>> smem_b;
};
// Host side kernel arguments
struct Arguments {
ElementA const* ptr_A;
StrideA dA;
ElementB const* ptr_B;
StrideB dB;
};
// Device side kernel params
using Params = Arguments;
//
// Methods
//
CollectiveMma() = default;
template <class ProblemShape>
static constexpr Params
to_underlying_arguments(ProblemShape const& _, Arguments const& args, void* workspace) {
(void) workspace;
return args;
}
/// Perform a collective-scoped matrix multiply-accumulate
template <
class FrgTensorD,
class TensorA,
class TensorB,
class FrgTensorC,
class KTileIterator,
class ResidueMNK
>
CUTLASS_DEVICE void
operator() (
FrgTensorD &accum,
TensorA gA, // (BLK_M, BLK_K, K_TILES)
TensorB gB, // (BLK_N, BLK_K, K_TILES)
FrgTensorC const &src_accum,
KTileIterator k_tile_iter, int k_tile_count,
ResidueMNK residue_mnk,
int thread_idx,
char *smem_buf)
{
using namespace cute;
static_assert(is_rmem<FrgTensorD>::value, "D tensor must be rmem resident.");
static_assert(is_gmem<TensorA>::value, "A tensor must be gmem resident.");
static_assert(is_gmem<TensorB>::value, "B tensor must be gmem resident.");
static_assert(is_rmem<FrgTensorC>::value, "C tensor must be rmem resident.");
static_assert(cute::rank(SmemLayoutA{}) == 3, "Smem layout must be rank 3.");
static_assert(cute::rank(SmemLayoutB{}) == 3, "Smem layout must be rank 3.");
// Construct shared memory tiles
SharedStorage& storage = *reinterpret_cast<SharedStorage*>(smem_buf);
Tensor sA = make_tensor(make_smem_ptr(storage.smem_a.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(storage.smem_b.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
CUTE_STATIC_ASSERT_V(size<0>(gA) == size<0>(sA)); // BLK_M
CUTE_STATIC_ASSERT_V(size<1>(gA) == size<1>(sA)); // BLK_K
CUTE_STATIC_ASSERT_V(size<0>(gB) == size<0>(sB)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(gB) == size<1>(sB)); // BLK_K
CUTE_STATIC_ASSERT_V(size<1>(sA) == size<1>(sB)); // BLK_K
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sA)); // PIPE
CUTE_STATIC_ASSERT_V(Int<DispatchPolicy::Stages>{} == size<2>(sB)); // PIPE
// Shift tensor so residue_k is at origin (Can't read any k_coord < residue_k)
// This aligns the tensor with BLK_K for all but the 0th k_tile
gA = cute::domain_offset(make_coord(0, get<2>(residue_mnk), 0), gA);
gB = cute::domain_offset(make_coord(0, get<2>(residue_mnk), 0), gB);
// Partition the copying of A and B tiles across the threads
GmemTiledCopyA gmem_tiled_copy_A;
GmemTiledCopyB gmem_tiled_copy_B;
auto gmem_thr_copy_A = gmem_tiled_copy_A.get_slice(thread_idx);
auto gmem_thr_copy_B = gmem_tiled_copy_B.get_slice(thread_idx);
Tensor tAgA = gmem_thr_copy_A.partition_S(gA); // (ACPY,ACPY_M,ACPY_K,k)
Tensor tAsA = gmem_thr_copy_A.partition_D(sA); // (ACPY,ACPY_M,ACPY_K,PIPE)
Tensor tBgB = gmem_thr_copy_B.partition_S(gB); // (BCPY,BCPY_N,BCPY_K,k)
Tensor tBsB = gmem_thr_copy_B.partition_D(sB); // (BCPY,BCPY_N,BCPY_K,PIPE)
//
// PREDICATES
//
// Allocate predicate tensors for m and n
Tensor tApA = make_tensor<bool>(make_shape(size<1>(tAsA), size<2>(tAsA)), Stride<_1,_0>{});
Tensor tBpB = make_tensor<bool>(make_shape(size<1>(tBsB), size<2>(tBsB)), Stride<_1,_0>{});
// Construct identity layout for sA and sB
Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA))); // (BLK_M,BLK_K) -> (blk_m,blk_k)
Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB))); // (BLK_N,BLK_K) -> (blk_n,blk_k)
// Repeat the partitioning with identity layouts
Tensor tAcA = gmem_thr_copy_A.partition_S(cA); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k)
Tensor tBcB = gmem_thr_copy_B.partition_S(cB); // (BCPY,BCPY_N,BCPY_K) -> (blk_n,blk_k)
// Set predicates for m bounds
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < size<0>(tApA); ++m) {
tApA(m,0) = get<0>(tAcA(0,m,0)) < get<0>(residue_mnk); // blk_m coord < residue_m
}
// Set predicates for n bounds
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < size<0>(tBpB); ++n) {
tBpB(n,0) = get<0>(tBcB(0,n,0)) < get<1>(residue_mnk); // blk_n coord < residue_n
}
//
// PREFETCH
//
// Clear the smem tiles to account for predicated off loads
clear(tAsA);
clear(tBsB);
// Start async loads for 0th k-tile, where we take care of the k residue
{
constexpr int k_pipe = 0;
Tensor tAgAk = tAgA(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tAsA); ++k) {
if (get<1>(tAcA(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gA shifted)
copy_if(gmem_tiled_copy_A, tApA(_,k), tAgAk(_,_,k), tAsA(_,_,k,k_pipe));
}
}
Tensor tBgBk = tBgB(_,_,_,*k_tile_iter);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < size<2>(tBsB); ++k) {
if (get<1>(tBcB(0,0,k)) >= -get<2>(residue_mnk)) { // blk_k coord < residue_k (gB shifted)
copy_if(gmem_tiled_copy_B, tBpB(_,k), tBgBk(_,_,k), tBsB(_,_,k,k_pipe));
}
}
cp_async_fence();
++k_tile_iter;
--k_tile_count;
}
// Start async loads for 1st k-tile onwards, no k-residue handling needed
CUTLASS_PRAGMA_UNROLL
for (int k_pipe = 1; k_pipe < DispatchPolicy::Stages-1; ++k_pipe) {
if (k_tile_count <= 0) {
clear(tApA);
clear(tBpB);
}
copy_if(gmem_tiled_copy_A, tApA, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,k_pipe)); // CpAsync
copy_if(gmem_tiled_copy_B, tBpB, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,k_pipe)); // CpAsync
cp_async_fence();
++k_tile_iter;
--k_tile_count;
}
//
// MMA Atom partitioning
//
// Tile MMA compute thread partitions and allocate accumulators
TiledMma tiled_mma;
auto thr_mma = tiled_mma.get_thread_slice(thread_idx);
Tensor tCrA = thr_mma.partition_fragment_A(sA(_,_,0)); // (MMA,MMA_M,MMA_K)
Tensor tCrB = thr_mma.partition_fragment_B(sB(_,_,0)); // (MMA,MMA_N,MMA_K)
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(src_accum)); // MMA_M
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(src_accum)); // MMA_N
CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K
//
// Copy Atom retiling
//
auto smem_tiled_copy_A = make_tiled_copy_A(SmemCopyAtomA{}, tiled_mma);
auto smem_thr_copy_A = smem_tiled_copy_A.get_thread_slice(thread_idx);
Tensor tCsA = smem_thr_copy_A.partition_S(sA); // (CPY,CPY_M,CPY_K,PIPE)
Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA); // (CPY,CPY_M,CPY_K)
CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // CPY_M
CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCrA_copy_view)); // CPY_K
auto smem_tiled_copy_B = make_tiled_copy_B(SmemCopyAtomB{}, tiled_mma);
auto smem_thr_copy_B = smem_tiled_copy_B.get_thread_slice(thread_idx);
Tensor tCsB = smem_thr_copy_B.partition_S(sB); // (CPY,CPY_N,CPY_K,PIPE)
Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB); // (CPY,CPY_N,CPY_K)
CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // CPY_N
CUTE_STATIC_ASSERT_V(size<2>(tCsB) == size<2>(tCrB_copy_view)); // CPY_K
//
// PIPELINED MAIN LOOP
//
// Current pipe index in smem to read from
int smem_pipe_read = 0;
// Current pipe index in smem to write to
int smem_pipe_write = DispatchPolicy::Stages-1;
Tensor tCsA_p = tCsA(_,_,_,smem_pipe_read);
Tensor tCsB_p = tCsB(_,_,_,smem_pipe_read);
// Size of the register pipeline
auto K_BLOCK_MAX = size<2>(tCrA);
// PREFETCH register pipeline
if (K_BLOCK_MAX > 1) {
// Wait until our first prefetched tile is loaded in
cp_async_wait<DispatchPolicy::Stages-2>();
__syncthreads();
// Prefetch the first rmem from the first k-tile
copy(smem_tiled_copy_A, tCsA_p(_,_,Int<0>{}), tCrA_copy_view(_,_,Int<0>{}));
copy(smem_tiled_copy_B, tCsB_p(_,_,Int<0>{}), tCrB_copy_view(_,_,Int<0>{}));
}
CUTLASS_PRAGMA_NO_UNROLL
for ( ; k_tile_count > -(DispatchPolicy::Stages-1); --k_tile_count)
{
// Pipeline the outer products with a static for loop.
//
// Note, the for_each() function is required here to ensure `k_block` is of type Int<N>.
for_each(make_int_sequence<K_BLOCK_MAX>{}, [&] (auto k_block)
{
if (k_block == K_BLOCK_MAX - 1)
{
// Slice the smem_pipe_read smem
tCsA_p = tCsA(_,_,_,smem_pipe_read);
tCsB_p = tCsB(_,_,_,smem_pipe_read);
// Commit the smem for smem_pipe_read
cp_async_wait<DispatchPolicy::Stages-2>();
__syncthreads();
}
// Load A, B shmem->regs for k_block+1
auto k_block_next = (k_block + Int<1>{}) % K_BLOCK_MAX; // static
copy(smem_tiled_copy_A, tCsA_p(_,_,k_block_next), tCrA_copy_view(_,_,k_block_next));
copy(smem_tiled_copy_B, tCsB_p(_,_,k_block_next), tCrB_copy_view(_,_,k_block_next));
// Copy gmem to smem before computing gemm on each k-pipe
if (k_block == 0)
{
// Set all predicates to false if we are going to overshoot bounds
if (k_tile_count <= 0) {
clear(tApA);
clear(tBpB);
}
copy_if(gmem_tiled_copy_A, tApA, tAgA(_,_,_,*k_tile_iter), tAsA(_,_,_,smem_pipe_write));
copy_if(gmem_tiled_copy_B, tBpB, tBgB(_,_,_,*k_tile_iter), tBsB(_,_,_,smem_pipe_write));
cp_async_fence();
++k_tile_iter;
// Advance the pipe -- Doing it here accounts for K_BLOCK_MAX = 1 (no rmem pipe)
smem_pipe_write = smem_pipe_read;
++smem_pipe_read;
smem_pipe_read = (smem_pipe_read == DispatchPolicy::Stages) ? 0 : smem_pipe_read;
}
// Transform before compute
cute::transform(tCrA(_,_,k_block), TransformA{});
cute::transform(tCrB(_,_,k_block), TransformB{});
// Thread-level register gemm for k_block
cute::gemm(tiled_mma, accum, tCrA(_,_,k_block), tCrB(_,_,k_block), src_accum);
});
}
cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::collective
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/collective/sm80_mma_multistage.hpp/0 | {
"file_path": "include/cutlass/gemm/collective/sm80_mma_multistage.hpp",
"repo_id": "include",
"token_count": 13285
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined Rank2K kernel. Does not compute batching or support split-K.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/rank_2k_universal.h"
#include "cutlass/gemm/kernel/default_rank_2k_universal.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassTensorOp,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm80,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementA_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
typename threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementA_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementA_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementB_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Operation performed by SYRK
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator,
/// Complex elementwise transformation
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex elementwise transformation
ComplexTransform TransformB = ComplexTransform::kNone,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_ = BlasMode::kSymmetric>
class Rank2K {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static FillMode const kFillModeC = FillModeC;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
static BlasMode const kBlasMode = BlasMode_;
static int const kUpdateRank = 2;
// static asserts for rank 2k update kernel
static_assert(platform::is_same<LayoutA, LayoutB>::value,
"Rank 2K update operator support same layouts for operandA and B");
/// Define the kernel
using Rank2Kkernel = typename kernel::DefaultRank2KUniversal<
ElementA,
LayoutA,
kTransformA,
kAlignmentA,
ElementB,
LayoutB,
kTransformB,
kAlignmentB,
ElementC,
LayoutC,
kFillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kSplitKSerial,
Operator,
kBlasMode
>::Rank2Kkernel;
using Arguments = typename Rank2Kkernel::Arguments;
private:
/// Kernel parameters object
typename Rank2Kkernel::Params params_;
public:
/// Constructs the SYRK.
Rank2K() { }
/// Determines whether the SYRK can execute the given problem.
static Status can_implement(Arguments const &args) {
if (!kSplitKSerial && args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
Status status = Rank2Kkernel::can_implement(args);
if (FillModeC != FillMode::kLower && FillModeC != FillMode::kUpper) {
return Status::kErrorInvalidProblem;
}
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial && args.batch_count > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes SYRK state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (kSplitKSerial) {
if (args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.batch_count > 1) {
return Status::kErrorInvalidProblem;
}
}
int gemm_k_size = args.problem_size.k();
// Initialize the Params structure
params_ = typename Rank2Kkernel::Params{
args,
grid_tiled_shape,
gemm_k_size,
static_cast<int *>(workspace)
};
int smem_size = int(sizeof(typename Rank2Kkernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<Rank2Kkernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.batch_count > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
size_t workspace_bytes = get_workspace_size(args);
if (workspace_bytes && !workspace) {
return Status::kErrorWorkspaceNull;
}
params_.update(args, workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(Rank2Kkernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename Rank2Kkernel::SharedStorage));
cutlass::Kernel<Rank2Kkernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for column-major output exchange operand.
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Access granularity of A matrix in units of elements
int AlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial,
/// Operation performed by Rank2K update kernel
typename Operator_,
/// Complex elementwise transformation
ComplexTransform TransformA,
/// Complex elementwise transformation
ComplexTransform TransformB,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_
>
class Rank2K<ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_,
layout::ColumnMajor, // partially specialized on LayoutC
FillModeC, ElementAccumulator_, OperatorClass_, ArchTag_, ThreadblockShape_,
WarpShape_, InstructionShape_, EpilogueOutputOp_,
ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB,
SplitKSerial, Operator_, TransformA, TransformB, BlasMode_> {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = layout::ColumnMajor;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static FillMode const kFillModeC = FillModeC;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static BlasMode const kBlasMode = BlasMode_;
static ComplexTransform const kTransformA = TransformA;
static ComplexTransform const kTransformB = TransformB;
static int const kUpdateRank = 2;
/// Define the kernel
using UnderlyingOperator = typename cutlass::gemm::device::Rank2K<
ElementB,
LayoutB,
ElementA,
LayoutA,
ElementC,
layout::RowMajor,
InvertFillMode<FillModeC>::mode,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kAlignmentB,
kAlignmentA,
kSplitKSerial,
Operator,
kTransformA,
kTransformB,
kBlasMode
>;
/// Argument structure
using Arguments = typename UnderlyingOperator::Arguments;
using Rank2Kkernel = typename UnderlyingOperator::Rank2Kkernel;
private:
UnderlyingOperator underlying_operator_;
public:
/// Constructs the Rank2K.
Rank2K() { }
/// Helper to construct a transposed equivalent for the underying Rank2K operator
static Arguments to_underlying_arguments(Arguments const &args) {
return args.transposed_problem();
}
/// Determines whether the Rank2K can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
}
/// Computes the grid shape
static dim3 get_grid_shape(Arguments const &args) {
return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args));
}
/// Computes the maximum number of active blocks per multiprocessor
static int maximum_active_blocks(int smem_capacity = -1) {
return UnderlyingOperator::maximum_active_blocks(smem_capacity);
}
/// Initializes Rank2K state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream);
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return underlying_operator_.update(to_underlying_arguments(args), workspace);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace Rank2K
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/rank_2k.h/0 | {
"file_path": "include/cutlass/gemm/device/rank_2k.h",
"repo_id": "include",
"token_count": 6106
} | 28 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level grouped Rank2K.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/kernel/rank_2k_transpose_operands.h"
#include "cutlass/gemm/kernel/default_rank_2k.h"
#include "cutlass/gemm/kernel/default_rank_2k_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Blas3 computation mode
BlasMode BlasMode_ = BlasMode::kSymmetric,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_ = GroupScheduleMode::kDeviceOnly,
///
typename Enable = void
>
struct DefaultRank2KGrouped;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Real-valued grouped Rank2K
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Blas3 computation mode
BlasMode BlasMode_,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_
>
struct DefaultRank2KGrouped<ElementA, LayoutA, TransformA, kAlignmentA,
ElementB, LayoutB, TransformB, kAlignmentB,
ElementC, LayoutC,
FillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape,
WarpShape, InstructionShape, EpilogueOutputOp,
ThreadblockSwizzle, Stages, Operator, BlasMode_, GroupScheduleMode_,
typename platform::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
> {
// If true, we must construct a 'transposed-and-exchanged' Rank2K operator.
static bool const kInternalTranspose = platform::is_same<LayoutC, layout::ColumnMajor>::value;
using MapArguments = kernel::detail::Rank2KMapArguments<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
LayoutC,
FillModeC,
kInternalTranspose
>;
// Define the default grouped Rank2K kernel
using DefaultRank2Kkernel = typename kernel::DefaultRank2K<
typename MapArguments::ElementA,
typename MapArguments::LayoutA,
MapArguments::kAlignmentA,
typename MapArguments::ElementB,
typename MapArguments::LayoutB,
MapArguments::kAlignmentB,
ElementC,
typename MapArguments::LayoutC,
MapArguments::kFillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
false, // SplitKSerial
Operator,
BlasMode_
>::Rank2Kkernel;
/// Define the kernel in terms of the default kernel
using Rank2Kkernel = kernel::Rank2KGrouped<
typename DefaultRank2Kkernel::Mma1,
typename DefaultRank2Kkernel::Mma2,
typename DefaultRank2Kkernel::Epilogue,
ThreadblockSwizzle,
TransformA,
TransformB,
DefaultRank2Kkernel::kFillModeC,
DefaultRank2Kkernel::kBlasMode,
GroupScheduleMode_,
kInternalTranspose
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Complex-valued grouped Rank2K
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Complex elementwise transformation on A operand
ComplexTransform TransformA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Complex elementwise transformation on B operand
ComplexTransform TransformB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Fill Mode for C (kLower or kUpper)
FillMode FillModeC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Blas3 computation mode
BlasMode BlasMode_,
/// Whether the schedule of problems to visit has been precomputed
GroupScheduleMode GroupScheduleMode_
>
struct DefaultRank2KGrouped<ElementA, LayoutA, TransformA, kAlignmentA,
ElementB, LayoutB, TransformB, kAlignmentB,
ElementC, LayoutC,
FillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape,
WarpShape, InstructionShape, EpilogueOutputOp,
ThreadblockSwizzle, Stages, Operator, BlasMode_, GroupScheduleMode_,
typename platform::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
> {
// If true, we must construct a 'transposed-and-exchanged' Rank2K operator.
static bool const kInternalTranspose = platform::is_same<LayoutC, layout::ColumnMajor>::value;
using MapArguments = kernel::detail::Rank2KMapArguments<
ElementA,
LayoutA,
TransformA,
kAlignmentA,
ElementB,
LayoutB,
TransformB,
kAlignmentB,
LayoutC,
FillModeC,
kInternalTranspose
>;
// Define the default grouped Rank2K kernel
using DefaultRank2Kkernel = typename kernel::DefaultRank2KComplex<
typename MapArguments::ElementA,
typename MapArguments::LayoutA,
typename MapArguments::ElementB,
typename MapArguments::LayoutB,
ElementC,
typename MapArguments::LayoutC,
MapArguments::kFillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MapArguments::kTransformA,
MapArguments::kTransformB,
Operator,
false, // SplitKSerial
BlasMode_
>::Rank2Kkernel;
/// Define the kernel in terms of the default kernel
/// Pass through the user-provided TransformA and TransformB so as to
/// correctly set public-facing TransformA and TransformB in kernel::Rank2KGrouped.
/// This is needed because kernel::DefaultRank2KComplex may change TransformA and
/// TransformB that become template arguments to Mma1 and Mma2.
using Rank2Kkernel = kernel::Rank2KGrouped<
typename DefaultRank2Kkernel::Mma1,
typename DefaultRank2Kkernel::Mma2,
typename DefaultRank2Kkernel::Epilogue,
ThreadblockSwizzle,
TransformA,
TransformB,
DefaultRank2Kkernel::kFillModeC,
DefaultRank2Kkernel::kBlasMode,
GroupScheduleMode_,
kInternalTranspose
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/default_rank_2k_grouped.h/0 | {
"file_path": "include/cutlass/gemm/kernel/default_rank_2k_grouped.h",
"repo_id": "include",
"token_count": 3963
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Scheduler for grouped GEMM
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/gemm/kernel/grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
// Helper for correctly representing problem sizes in grouped kernels
template <
typename ThreadblockShape,
bool Transposed
>
struct GemmGroupedProblemSizeHelper {
static bool const kTransposed = Transposed;
CUTLASS_HOST_DEVICE
static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) {
return cutlass::gemm::GemmCoord(
((problem.m() - 1 + ThreadblockShape::kM) / ThreadblockShape::kM),
((problem.n() - 1 + ThreadblockShape::kN) / ThreadblockShape::kN),
1);
}
CUTLASS_HOST_DEVICE
static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) {
if (kTransposed) {
swap(problem.m(), problem.n());
}
}
CUTLASS_HOST_DEVICE
static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) {
return grid.m() * grid.n();
}
};
} // namespace detail
/// Visitor class to abstract away the algorithm for iterating over tiles
template <typename ThreadblockShape,
GroupScheduleMode GroupScheduleMode_,
int PrefetchTileCount,
int ThreadCount,
bool Transposed = false>
struct GemmGroupedProblemVisitor : public GroupedProblemVisitor<
detail::GemmGroupedProblemSizeHelper<ThreadblockShape, Transposed>,
ThreadblockShape,
GroupScheduleMode_,
PrefetchTileCount,
ThreadCount> {
static bool const kTransposed = Transposed;
using ProblemSizeHelper = detail::GemmGroupedProblemSizeHelper<ThreadblockShape, Transposed>;
using Base = GroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape, GroupScheduleMode_, PrefetchTileCount, ThreadCount>;
using Params = typename Base::Params;
using SharedStorage = typename Base::SharedStorage;
//
// Methods
//
CUTLASS_DEVICE
GemmGroupedProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base (params_, shared_storage_, block_idx)
{}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_grouped_problem_visitor.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_grouped_problem_visitor.h",
"repo_id": "include",
"token_count": 1488
} | 30 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Gemm kernel with an epilogue defined under the epilogue visitor concept
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/kernel/gemm_universal.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Gemm that compute the epilogue visitor functor
template <
typename Mma, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
class GemmWithEpilogueVisitor: GemmUniversal<Mma,Epilogue, ThreadblockSwizzle_> {
public:
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Base = GemmUniversal<Mma,Epilogue, ThreadblockSwizzle>;
using Base::Base;
using FusionCallbacks = typename Epilogue::FusionCallbacks;
using ElementA = typename Base::ElementA;
using LayoutA = typename Base::LayoutA;
using ElementB = typename Base::ElementB;
using LayoutB = typename Base::LayoutB;
using ElementC = typename Base::ElementC;
using LayoutC = typename Base::LayoutC;
using ThreadblockShape = typename Mma::Shape;
//
// Structures
//
using SharedStorage = typename Base::SharedStorage;
using Arguments = typename Base::Arguments;
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>
{
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>;
//
// Data members
//
cute::Shape<int32_t,int32_t,int32_t> problem_shape;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename FusionCallbacks::Params output_op;
void * ptr_A;
void * ptr_B;
int64_t batch_stride_A;
int64_t batch_stride_B;
int * ptr_gather_A_indices;
int * ptr_gather_B_indices;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a),
params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b),
output_op(FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/)),
problem_shape({args.problem_size.m(), args.problem_size.n(), args.batch_count}),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
ptr_gather_A_indices(const_cast<int *>(args.ptr_gather_A_indices)),
ptr_gather_B_indices(const_cast<int *>(args.ptr_gather_B_indices))
{
// Raise error on unsupported modes
assert(args.mode != GemmUniversalMode::kGemmSplitKParallel && "Sm80 EVT does not support SplitKParallel.");
assert(!(args.mode == GemmUniversalMode::kGemm && this->grid_tiled_shape.k() > 1 )
&& "Sm80 EVT does not support SplitKSerial.");
assert(args.mode != GemmUniversalMode::kArray && "Sm80 EVT does not support Array Gemm.");
}
/// Lightweight update given a subset of arguments.
void update(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversalwithVisitor::Params::update()");
// Update input pointers
ptr_A = const_cast<void *>(args.ptr_A);
ptr_B = const_cast<void *>(args.ptr_B);
batch_stride_A = args.batch_stride_A;
batch_stride_B = args.batch_stride_B;
this->batch_stride_D = args.batch_stride_D;
ptr_gather_A_indices = const_cast<int *>(args.ptr_gather_A_indices);
ptr_gather_B_indices = const_cast<int *>(args.ptr_gather_B_indices);
output_op = FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/);
problem_shape = make_shape(args.problem_size.m(), args.problem_size.n(), args.batch_count);
}
};
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmWithEpilogueVisitor op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
ThreadblockSwizzle threadblock_swizzle;
run_with_swizzle(params, shared_storage, threadblock_swizzle);
}
/// Executes one GEMM with an externally-provided swizzling function
CUTLASS_DEVICE
void run_with_swizzle(Params const ¶ms, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) {
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A,
params.ptr_gather_A_indices);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B,
params.ptr_gather_B_indices);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Epilogue
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
Epilogue epilogue(
params.output_op,
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
epilogue(accumulators, threadblock_tile_offset, params.problem_shape, thread_idx);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_universal_with_visitor.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_universal_with_visitor.h",
"repo_id": "include",
"token_count": 3758
} | 31 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/workspace.h"
#include "cutlass/fast_math.h"
#include "cutlass/kernel_hardware_info.hpp"
#include "cute/arch/cluster_sm90.hpp"
#include "cutlass/arch/reg_reconfig.h"
#include "cutlass/arch/mma_sm90.h"
#include "cutlass/epilogue/collective/detail.hpp"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/kernel/gemm_universal_decl.h"
#include "cutlass/gemm/kernel/tile_scheduler.hpp"
#include "cutlass/gemm/group_array_problem_shape.hpp"
#include "cutlass/pipeline/pipeline.hpp"
#include "cute/tensor.hpp"
#include "cutlass/trace.h"
///////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::kernel {
///////////////////////////////////////////////////////////////////////////////
template <
class ProblemShape_,
class CollectiveMainloop_,
class CollectiveEpilogue_,
class TileScheduler_
>
class GemmUniversal<
ProblemShape_,
CollectiveMainloop_,
CollectiveEpilogue_,
TileScheduler_,
cute::enable_if_t<cute::is_base_of_v<KernelPtrArrayTmaWarpSpecializedCooperative, typename CollectiveMainloop_::DispatchPolicy::Schedule>>
>
{
public:
//
// Type Aliases
//
using ProblemShape = ProblemShape_;
static_assert(rank(typename ProblemShape::UnderlyingProblemShape{}) == 3 or rank(typename ProblemShape::UnderlyingProblemShape{}) == 4,
"ProblemShape{} should be <M,N,K> or <M,N,K,L>");
// Mainloop derived types
using CollectiveMainloop = CollectiveMainloop_;
using TileShape = typename CollectiveMainloop::TileShape;
using TiledMma = typename CollectiveMainloop::TiledMma;
using ArchTag = typename CollectiveMainloop::ArchTag;
using ElementA = typename CollectiveMainloop::ElementA;
using StrideA = typename CollectiveMainloop::StrideA;
using InternalStrideA = typename CollectiveMainloop::InternalStrideA;
using ElementB = typename CollectiveMainloop::ElementB;
using InternalStrideB = typename CollectiveMainloop::InternalStrideB;
using StrideB = typename CollectiveMainloop::StrideB;
using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy;
using Schedule = typename DispatchPolicy::Schedule;
using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator;
using ClusterShape = typename DispatchPolicy::ClusterShape;
using MainloopArguments = typename CollectiveMainloop::Arguments;
using MainloopParams = typename CollectiveMainloop::Params;
// Epilogue derived types
using CollectiveEpilogue = CollectiveEpilogue_;
using ElementC = typename CollectiveEpilogue::ElementC;
using StrideC = typename CollectiveEpilogue::StrideC;
using InternalStrideC = typename CollectiveEpilogue::InternalStrideC;
using ElementD = typename CollectiveEpilogue::ElementD;
using StrideD = typename CollectiveEpilogue::StrideD;
using InternalStrideD = typename CollectiveEpilogue::InternalStrideD;
using EpilogueArguments = typename CollectiveEpilogue::Arguments;
using EpilogueParams = typename CollectiveEpilogue::Params;
static_assert(ArchTag::kMinComputeCapability >= 90);
static_assert(cute::is_void_v<TileScheduler_>,
"Ptr-Array Cooperative and Grouped Gemm Cooperative kernel only supports the default scheduler.");
static constexpr bool IsGroupedGemmKernel = !cute::is_same_v<InternalStrideA, StrideA>;
using TileScheduler = cute::conditional_t<IsGroupedGemmKernel,
typename detail::TileSchedulerSelector<
GroupScheduler, ArchTag,
TileShape, ClusterShape,
ProblemShape>::Scheduler,
typename detail::TileSchedulerSelector<
void, ArchTag, TileShape, ClusterShape>::Scheduler>;
using TileSchedulerArguments = typename TileScheduler::Arguments;
using TileSchedulerParams = typename TileScheduler::Params;
static constexpr uint32_t NumLoadWarpGroups = 1;
static constexpr uint32_t NumMmaWarpGroups = CUTE_STATIC_V(size(TiledMma{})) / NumThreadsPerWarpGroup;
static constexpr uint32_t MaxThreadsPerBlock = CUTE_STATIC_V(size(TiledMma{})) + (NumLoadWarpGroups * NumThreadsPerWarpGroup);
static constexpr uint32_t MinBlocksPerMultiprocessor = 1;
/// Register requirement for Load and Math WGs
static constexpr uint32_t LoadRegisterRequirement = 40;
static constexpr uint32_t MmaRegisterRequirement = 232;
// 1 stage ordered sequence between mainloop and epilogue producer load threads
using LoadWarpOrderBarrier = cutlass::OrderedSequenceBarrier<1,2>;
// Kernel level shared memory storage
struct SharedStorage {
struct TensorStorage : cute::aligned_struct<128> {
using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage;
using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage;
MainloopTensorStorage mainloop;
EpilogueTensorStorage epilogue;
} tensors;
struct PipelineStorage : cute::aligned_struct<16> {
using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage;
using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage;
alignas(16) MainloopPipelineStorage mainloop;
alignas(16) EpiLoadPipelineStorage epi_load;
alignas(16) typename LoadWarpOrderBarrier::SharedStorage load_order;
} pipelines;
struct TensorMapStorage : cute::aligned_struct<128> {
using MainloopTensorMapStorage = typename CollectiveMainloop::TensorMapStorage;
using EpilogueTensorMapStorage = typename CollectiveEpilogue::TensorMapStorage;
alignas(128) MainloopTensorMapStorage mainloop;
alignas(128) EpilogueTensorMapStorage epilogue;
} tensormaps;
};
static constexpr int SharedStorageSize = sizeof(SharedStorage);
// Device side arguments
struct Arguments {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopArguments mainloop{};
EpilogueArguments epilogue{};
KernelHardwareInfo hw_info{};
TileSchedulerArguments scheduler{};
};
// Kernel entry point API
struct Params {
GemmUniversalMode mode{};
ProblemShape problem_shape{};
MainloopParams mainloop{};
EpilogueParams epilogue{};
KernelHardwareInfo hw_info{};
TileSchedulerParams scheduler{};
void* workspace{nullptr};
};
//
// Methods
//
// Convert to underlying arguments. In this case, a simple copy for the aliased type.
static
Params
to_underlying_arguments(Arguments const& args, void* workspace) {
CUTLASS_TRACE_HOST("to_underlying_arguments():");
ProblemShape problem_shapes = args.problem_shape;
// Get SM count if needed, otherwise use user supplied SM count
int sm_count = args.hw_info.sm_count;
if (sm_count <= 0) {
CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n"
" For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count.");
sm_count = KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id);
}
CUTLASS_TRACE_HOST("to_underlying_arguments(): Setting persistent grid SM count to " << sm_count);
KernelHardwareInfo hw_info{args.hw_info.device_id, sm_count};
// Calculate workspace pointers
uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace);
size_t workspace_offset = 0;
void* scheduler_workspace = workspace_ptr;
workspace_offset += TileScheduler::template get_workspace_size<typename ProblemShape::UnderlyingProblemShape, ElementAccumulator>(
args.scheduler, typename ProblemShape::UnderlyingProblemShape{}, args.hw_info, NumMmaWarpGroups);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
void* epilogue_workspace = workspace_ptr + workspace_offset;
workspace_offset += CollectiveEpilogue::get_workspace_size(problem_shapes, args.epilogue, args.hw_info.sm_count);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
void* mainloop_workspace = workspace_ptr + workspace_offset;
workspace_offset += CollectiveMainloop::get_workspace_size(problem_shapes, args.mainloop, args.hw_info.sm_count);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
// Precompute the sub tiles numbers in epilogue, pass into tile scheduler. Therefore it will be used
// in separate reduction scheme for streamk case, NumEpilogueSubTiles default value is 1, which means
// subtile will not be used, therefore separate reduction will not be enabled.
constexpr uint32_t NumEpilogueSubTiles = CollectiveEpilogue::get_store_pipe_increment(TileShape{});
TileSchedulerParams scheduler;
if constexpr (IsGroupedGemmKernel) {
scheduler = TileScheduler::to_underlying_arguments(
problem_shapes, TileShape{}, ClusterShape{}, hw_info, args.scheduler, scheduler_workspace, NumEpilogueSubTiles);
}
else {
scheduler = TileScheduler::to_underlying_arguments(
problem_shapes.get_host_problem_shape(), TileShape{}, ClusterShape{}, hw_info, args.scheduler, scheduler_workspace, NumEpilogueSubTiles);
}
return {
args.mode,
problem_shapes,
CollectiveMainloop::to_underlying_arguments(problem_shapes, args.mainloop, mainloop_workspace),
CollectiveEpilogue::to_underlying_arguments(problem_shapes, args.epilogue, epilogue_workspace),
hw_info,
scheduler,
workspace
};
}
static bool
can_implement(Arguments const& args) {
bool implementable = true;
if constexpr (IsGroupedGemmKernel) {
// Group GEMM currently only supports rank-3 problem shapes
implementable &= (args.mode == GemmUniversalMode::kGrouped && rank(typename ProblemShape::UnderlyingProblemShape{}) == 3);
} else {
implementable &= (args.mode == GemmUniversalMode::kArray && rank(typename ProblemShape::UnderlyingProblemShape{}) == 4);
}
if (!implementable) {
CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements for Ptr Array Gemm or Grouped Gemm.\n");
return implementable;
}
implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop);
implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue);
implementable &= TileScheduler::can_implement(args.scheduler);
return implementable;
}
static size_t
get_workspace_size(Arguments const& args) {
size_t workspace_size = 0;
constexpr uint32_t NumEpilogueSubTiles = CollectiveEpilogue::get_store_pipe_increment(TileShape{});
workspace_size += TileScheduler::template get_workspace_size<typename ProblemShape::UnderlyingProblemShape, ElementAccumulator>(
args.scheduler, typename ProblemShape::UnderlyingProblemShape{}, args.hw_info, NumMmaWarpGroups, NumEpilogueSubTiles);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
workspace_size += CollectiveEpilogue::get_workspace_size(args.problem_shape, args.epilogue, args.hw_info.sm_count);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
// Get SM count if needed, otherwise use user supplied SM count
int sm_count = args.hw_info.sm_count;
if (sm_count <= 0) {
CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n"
" For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count.");
sm_count = KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id);
}
workspace_size += CollectiveMainloop::get_workspace_size(args.problem_shape, args.mainloop, sm_count);
workspace_size = round_nearest(workspace_size, MinWorkspaceAlignment);
return workspace_size;
}
static cutlass::Status
initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr,
CudaHostAdapter* cuda_adapter = nullptr) {
Status status = Status::kSuccess;
uint8_t* workspace_ptr = reinterpret_cast<uint8_t*>(workspace);
size_t workspace_offset = 0;
constexpr uint32_t NumEpilogueSubTiles = CollectiveEpilogue::get_store_pipe_increment(TileShape{});
status = TileScheduler::template initialize_workspace<typename ProblemShape::UnderlyingProblemShape, ElementAccumulator>(
args.scheduler, workspace_ptr + workspace_offset, stream, typename ProblemShape::UnderlyingProblemShape{}, args.hw_info, NumMmaWarpGroups, NumEpilogueSubTiles, cuda_adapter);
workspace_offset += TileScheduler::template get_workspace_size<typename ProblemShape::UnderlyingProblemShape, ElementAccumulator>(
args.scheduler, typename ProblemShape::UnderlyingProblemShape{}, args.hw_info, NumMmaWarpGroups, NumEpilogueSubTiles);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
status = CollectiveEpilogue::initialize_workspace(args.problem_shape, args.epilogue, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += CollectiveEpilogue::get_workspace_size(args.problem_shape, args.epilogue, args.hw_info.sm_count);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
status = CollectiveMainloop::initialize_workspace(args.problem_shape, args.mainloop, workspace_ptr + workspace_offset, stream, cuda_adapter);
workspace_offset += CollectiveMainloop::get_workspace_size(args.problem_shape, args.mainloop, args.hw_info.sm_count);
workspace_offset = round_nearest(workspace_offset, MinWorkspaceAlignment);
if (status != Status::kSuccess) {
return status;
}
return status;
}
// Computes the kernel launch grid shape based on runtime parameters
static dim3
get_grid_shape(Params const& params) {
// Given device SM count, set grid size s.t. we do not launch more thread blocks than we can run concurrently
TileSchedulerArguments args{};
if constexpr (!std::is_const_v<decltype(args.max_swizzle_size)>) {
args.max_swizzle_size = 1 << params.scheduler.log_swizzle_size_;
}
args.raster_order = params.scheduler.raster_order_ == TileScheduler::RasterOrder::AlongN ? TileScheduler::RasterOrderOptions::AlongN : TileScheduler::RasterOrderOptions::AlongM;
dim3 grid_shape;
if constexpr (IsGroupedGemmKernel) {
grid_shape = TileScheduler::get_grid_shape(params.problem_shape, TileShape{}, ClusterShape{}, params.hw_info, args);
}
else {
grid_shape = TileScheduler::get_grid_shape(params.problem_shape.get_host_problem_shape(), TileShape{}, ClusterShape{}, params.hw_info, args);
}
return grid_shape;
}
static dim3
get_block_shape() {
return dim3(MaxThreadsPerBlock, 1, 1);
}
CUTLASS_DEVICE
void
operator()(Params const& params, char* smem_buf) {
using namespace cute;
using X = Underscore;
// Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a.
#if ! defined(__CUDA_ARCH_FEAT_SM90_ALL)
printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n");
#else
// Preconditions
static_assert(size(TiledMma{}) == 256, "Cooperative kernel must have TiledMMA operating using 256 threads.");
static_assert(size<0>(TileShape{}) >= 128,
"Cooperative kernel requires Tile Size to be greater than or equal to 128 along the M-dimension.");
static_assert(cute::rank(InternalStrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(InternalStrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(InternalStrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
static_assert(cute::rank(InternalStrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>.");
/* In the Cooperative kernel, Consumer0 and Consumer1 collaborate on the same tile */
enum class WarpGroupRole {
Producer = 0,
Consumer0 = 1,
Consumer1 = 2
};
enum class ProducerWarpRole {
Mainloop = 0,
Warp1 = 1,
Epilogue = 2,
Warp3 = 3
};
// Kernel level shared memory storage
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(smem_buf);
int thread_idx = int(threadIdx.x);
int lane_idx = canonical_lane_idx();
int warp_idx = canonical_warp_idx_sync();
int warp_idx_in_warp_group = warp_idx % NumWarpsPerWarpGroup;
int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup;
int mma_thread_idx = thread_idx % size(TiledMma{});
auto warp_group_role = WarpGroupRole(canonical_warp_group_idx());
auto producer_warp_role = ProducerWarpRole(warp_idx_in_warp_group);
int lane_predicate = cute::elect_one_sync();
uint32_t block_rank_in_cluster = cute::block_rank_in_cluster();
// Note: Tma Descriptor Prefetch (from either const or param) is not applicable here
// Mainloop Load pipeline
using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline;
typename MainloopPipeline::Params mainloop_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Mainloop) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) {
mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer;
}
mainloop_pipeline_params.is_leader = warp_group_thread_idx == 0;
mainloop_pipeline_params.num_consumers = size(TiledMma{});
mainloop_pipeline_params.transaction_bytes = params.mainloop.tma_transaction_bytes;
MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params, ClusterShape{});
// Epilogue Load pipeline
using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline;
typename EpiLoadPipeline::Params epi_load_pipeline_params;
if (warp_group_role == WarpGroupRole::Producer && producer_warp_role == ProducerWarpRole::Epilogue) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer;
}
if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) {
epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer;
}
epi_load_pipeline_params.dst_blockid = cute::block_rank_in_cluster();
epi_load_pipeline_params.producer_arv_count = NumThreadsPerWarp;
epi_load_pipeline_params.consumer_arv_count = size(TiledMma{});
if constexpr (CollectiveEpilogue::RequiresTransactionBytes) {
epi_load_pipeline_params.transaction_bytes = params.epilogue.tma_transaction_bytes;
}
EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params);
// Epilogue Store pipeline
using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline;
typename EpiStorePipeline::Params epi_store_pipeline_params;
epi_store_pipeline_params.always_wait = true;
EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params);
typename LoadWarpOrderBarrier::Params params_load_order_barrier;
params_load_order_barrier.group_id = producer_warp_role == ProducerWarpRole::Mainloop ? 0 : 1;
params_load_order_barrier.group_size = NumThreadsPerWarp;
LoadWarpOrderBarrier load_order_barrier(shared_storage.pipelines.load_order, params_load_order_barrier);
// Initialize starting pipeline states for the collectives
// Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding)
typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state;
typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state;
// For the DMA Load (producer) we start with an opposite phase
// i.e., we skip all waits since we know that the buffer is indeed empty
PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state<MainloopPipeline>();
PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state<EpiLoadPipeline>();
PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state<EpiStorePipeline>();
auto cluster_wait_fn = [] () {
// We need this to guarantee that the Pipeline init is visible
// To all producers and consumer thread blocks in the Cluster
if constexpr (size(ClusterShape{}) > 1) {
cute::cluster_arrive_relaxed();
return [] () { cute::cluster_wait(); };
}
else {
__syncthreads();
return [] () {}; // do nothing
}
} ();
// Get the appropriate blocks for this thread block -- potential for thread block locality
TiledMma tiled_mma;
auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K)
TileScheduler scheduler{params.scheduler};
// In a warp specialized kernel, collectives expose data movement and compute operations separately
CollectiveMainloop collective_mainloop;
CollectiveEpilogue collective_epilogue(params.epilogue, shared_storage.tensors.epilogue);
// Wait for all thread blocks in the Cluster
cluster_wait_fn();
auto work_tile_info = scheduler.initial_work_tile_info(ClusterShape{});
if (not work_tile_info.is_valid()) {
// When problem shapes are only on device, the grid launched may be larger than the total number of blocks across groups
return;
}
// Optionally append 1s until problem shape is rank-4 in case it is only rank-3 (MNK)
auto problem_shape_MNKL = append<4>(params.problem_shape.get_problem_shape(work_tile_info.L_idx), Int<1>{});
// Prepare and partition the input tensors. Expects a tuple of tensors where:
// get<0>(load_inputs) is the tma tensor A after local tiling so that it has shape (BLK_M,BLK_K,m,k,l)
// get<1>(load_inputs) is the tma tensor B after local tiling so that it has shape (BLK_N,BLK_K,n,k,l)
auto load_inputs = collective_mainloop.load_init(problem_shape_MNKL, params.mainloop);
static_assert(cute::tuple_size_v<decltype(load_inputs)> >= 2, "Output of load_init must have at least two elements (A, B)");
// Extract out partitioned A and B.
Tensor gA_mkl = get<0>(load_inputs);
Tensor gB_nkl = get<1>(load_inputs);
// Get pipeline stage increments from tensor shapes
auto k_tile_count = size<3>(gA_mkl);
if (warp_group_role == WarpGroupRole::Producer) {
cutlass::arch::warpgroup_reg_dealloc<LoadRegisterRequirement>();
// Mainloop Producer Warp
if (producer_warp_role == ProducerWarpRole::Mainloop) {
int32_t curr_batch = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); // Usually just returns work_tile_info.L_idx;
int32_t const mock_l_coord = 0;
int32_t const sm_idx = blockIdx.x + (blockIdx.y * gridDim.x);
int32_t const sm_count = params.hw_info.sm_count;
// Fetch a copy of tensormaps for the CTA
auto input_tensormaps = collective_mainloop.tensormaps_init(params.mainloop, sm_count, sm_idx);
// Update tensormap for the initial batch for the CTA
if (work_tile_info.is_valid()) {
collective_mainloop.tensormaps_perform_update(
shared_storage.tensormaps.mainloop,
params.mainloop,
input_tensormaps,
problem_shape_MNKL,
curr_batch
);
// Ensure warp is converged before issuing tensormap fence release
__syncwarp();
// Entire warp must do this (i.e. it's aligned)
collective_mainloop.tensormaps_cp_fence_release(shared_storage.tensormaps.mainloop, input_tensormaps);
}
bool do_load_order_arrive = true;
bool did_batch_change = true;
while (work_tile_info.is_valid()) {
if (!TileScheduler::valid_warpgroup_in_work_tile(work_tile_info)) {
work_tile_info = scheduler.fetch_next_work(work_tile_info);
continue;
}
// Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape
auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl));
auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl));
auto blk_coord = make_coord(m_coord, n_coord, _, mock_l_coord);
// Get the number of K tiles to compute for this work as well as the starting K tile offset of the work.
auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape);
auto work_k_tile_start = TileScheduler::get_work_k_tile_start(work_tile_info);
auto k_tile_iter = cute::make_coord_iterator(idx2crd(work_k_tile_start, shape<3>(gA_mkl)), shape<3>(gA_mkl));
if (did_batch_change) {
collective_mainloop.tensormaps_fence_acquire(input_tensormaps);
}
collective_mainloop.load(
params.mainloop,
mainloop_pipeline,
mainloop_pipe_producer_state,
load_inputs,
input_tensormaps,
blk_coord,
k_tile_iter, work_k_tile_count,
lane_idx,
block_rank_in_cluster,
shared_storage.tensors.mainloop
);
// Update starting pipeline state for the next tile
// Wait for the last TMA stage to complete loading, before issuing tensormap updates
mainloop_pipe_producer_state.advance(work_k_tile_count - 1);
// Signal for the epilogue load warp to begin
if (do_load_order_arrive) {
load_order_barrier.arrive();
do_load_order_arrive = false;
}
// Get next work tile
work_tile_info = scheduler.fetch_next_work(work_tile_info);
auto next_batch = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl)); // Usually just returns work_tile_info.L_idx
did_batch_change = next_batch != curr_batch;
if (work_tile_info.is_valid() && did_batch_change) {
curr_batch = next_batch;
if constexpr (IsGroupedGemmKernel) {
problem_shape_MNKL = append<4>(params.problem_shape.get_problem_shape(curr_batch), Int<1>{});
}
// Purpose of this pipeline state is to make sure TMA loads have finished before doing descriptor updates
// Since this state is waiting for loads to finish, it must start in the inverted phase.
typename CollectiveMainloop::PipelineState mainloop_pipe_tma_consumer_state =
{mainloop_pipe_producer_state.index(), !mainloop_pipe_producer_state.phase(), mainloop_pipe_producer_state.count()};
mainloop_pipeline.consumer_wait(mainloop_pipe_tma_consumer_state);
collective_mainloop.tensormaps_perform_update(
shared_storage.tensormaps.mainloop,
params.mainloop,
input_tensormaps,
problem_shape_MNKL,
curr_batch
);
// Ensure warp is converged before issuing tensor replace
__syncwarp();
// Entire warp must do this (i.e. it's aligned)
collective_mainloop.tensormaps_cp_fence_release(shared_storage.tensormaps.mainloop, input_tensormaps);
}
// Advance the producer state for the last remaining stage that was being waited for above
mainloop_pipe_producer_state.advance(1);
} // Scheduler work fetch loop
// Make sure all Consumer Warp Groups have been waited upon
collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state);
} // Mainloop Producer Warp End
// Epilogue Producer Warp
else if (producer_warp_role == ProducerWarpRole::Epilogue && collective_epilogue.is_producer_load_needed()) {
int32_t const sm_idx = blockIdx.x + (blockIdx.y * gridDim.x);
int32_t const sm_count = params.hw_info.sm_count;
auto epi_load_tensormap = get<0>(collective_epilogue.load_init(params.epilogue, sm_count, sm_idx));
bool did_batch_change = true;
constexpr bool IsEpiLoad = true;
if (work_tile_info.is_valid()) {
collective_epilogue.tensormaps_perform_update<IsEpiLoad>(
shared_storage.tensormaps.epilogue,
params.epilogue,
epi_load_tensormap,
work_tile_info.L_idx
);
// Converge before issuing tensormap fence release since fence is aligned
__syncwarp();
collective_epilogue.tensormaps_cp_fence_release<IsEpiLoad>(shared_storage.tensormaps.epilogue, epi_load_tensormap, lane_predicate);
}
load_order_barrier.wait();
while (work_tile_info.is_valid()) {
int32_t curr_batch = work_tile_info.L_idx;
bool compute_epilogue = TileScheduler::compute_epilogue(work_tile_info, params.scheduler);
if (compute_epilogue) {
if constexpr (IsGroupedGemmKernel) {
problem_shape_MNKL = append<4>(params.problem_shape.get_problem_shape(work_tile_info.L_idx), Int<1>{});
}
// Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape
auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl));
auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl));
auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl));
auto blk_coord = make_coord(m_coord, n_coord, _, l_coord);
if (did_batch_change) {
collective_epilogue.tensormaps_fence_acquire<IsEpiLoad>(epi_load_tensormap);
}
epi_load_pipe_producer_state = collective_epilogue.load(
epi_load_pipeline,
epi_load_pipe_producer_state,
problem_shape_MNKL,
blk_shape,
blk_coord,
tiled_mma,
lane_idx,
shared_storage.tensors.epilogue,
epi_load_tensormap,
work_tile_info.reduction_subtile_idx(),
true // return state prior to last advance
);
}
// Get next work tile
work_tile_info = scheduler.fetch_next_work(work_tile_info);
did_batch_change = curr_batch != work_tile_info.L_idx;
if (work_tile_info.is_valid() && did_batch_change) {
// Wait for TMA load to finish before updating
typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_tma_consumer_state =
{epi_load_pipe_producer_state.index(), !epi_load_pipe_producer_state.phase(), epi_load_pipe_producer_state.count()};
epi_load_pipeline.consumer_wait(epi_load_pipe_tma_consumer_state);
collective_epilogue.tensormaps_perform_update<IsEpiLoad>(
shared_storage.tensormaps.epilogue,
params.epilogue,
epi_load_tensormap,
work_tile_info.L_idx
);
// Converge before issuing tensormap fence release since fence is aligned
__syncwarp();
collective_epilogue.tensormaps_cp_fence_release<IsEpiLoad>(shared_storage.tensormaps.epilogue, epi_load_tensormap, lane_predicate);
}
if(compute_epilogue) {
epi_load_pipe_producer_state.advance(1);
}
} // Scheduler work fetch loop
// Make sure all Consumer Warp Groups have been waited upon
collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state);
} // Epilogue Producer Warp End
} // Producer Warp Group End
else if (warp_group_role == WarpGroupRole::Consumer0 || warp_group_role == WarpGroupRole::Consumer1) {
cutlass::arch::warpgroup_reg_alloc<MmaRegisterRequirement>();
int32_t const sm_idx = blockIdx.x + (blockIdx.y * gridDim.x);
int32_t const sm_count = params.hw_info.sm_count;
// Do we potentially issue tail arrives for TMA stores, if epilogue load is waiting for it
bool do_store_tail = false;
// Get a copy of tensormaps
auto epi_store_tensormap = get<0>(collective_epilogue.store_init(params.epilogue, sm_count, sm_idx));
bool did_batch_change = true;
constexpr bool IsEpiLoad = false;
if (work_tile_info.is_valid()) {
collective_epilogue.tensormaps_perform_update<IsEpiLoad>(
shared_storage.tensormaps.epilogue,
params.epilogue,
epi_store_tensormap,
work_tile_info.L_idx
);
// Converge before issuing tensormap fence release since fence is aligned
__syncwarp();
collective_epilogue.tensormaps_cp_fence_release<IsEpiLoad>(shared_storage.tensormaps.epilogue, epi_store_tensormap, lane_predicate);
}
while (work_tile_info.is_valid()) {
if constexpr (IsGroupedGemmKernel) {
problem_shape_MNKL = append<4>(params.problem_shape.get_problem_shape(work_tile_info.L_idx), Int<1>{});
}
int32_t curr_batch = work_tile_info.L_idx;
// Compute m_coord, n_coord, l_coord with the post-tiled m-shape and n-shape
auto m_coord = idx2crd(work_tile_info.M_idx, shape<2>(gA_mkl));
auto n_coord = idx2crd(work_tile_info.N_idx, shape<2>(gB_nkl));
auto l_coord = idx2crd(work_tile_info.L_idx, shape<4>(gB_nkl));
auto blk_coord = make_coord(m_coord, n_coord, _, l_coord);
auto work_k_tile_count = TileScheduler::get_work_k_tile_count(work_tile_info, problem_shape_MNKL, blk_shape);
// Allocate the accumulators for the (M,N) blk_shape
//
// MSVC CTAD breaks if we say "Tensor" here, so we use "auto" instead.
auto accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N)
if(TileScheduler::valid_warpgroup_in_work_tile(work_tile_info)) {
collective_mainloop.mma(
mainloop_pipeline,
mainloop_pipe_consumer_state,
accumulators,
work_k_tile_count,
mma_thread_idx,
shared_storage.tensors.mainloop,
params.mainloop
);
// Make sure the math instructions are done and free buffers before entering the epilogue
collective_mainloop.mma_tail(
mainloop_pipeline,
mainloop_pipe_consumer_state,
work_k_tile_count
);
// Update starting mainloop pipeline state for the next tile
mainloop_pipe_consumer_state.advance(work_k_tile_count);
}
// Index of warp group within consumer warp groups
int consumer_warp_group_idx = canonical_warp_group_idx() - NumLoadWarpGroups;
// Perform reduction across splits, if needed
TileScheduler::fixup(
params.scheduler, work_tile_info, accumulators, NumMmaWarpGroups, consumer_warp_group_idx);
if (TileScheduler::compute_epilogue(work_tile_info, params.scheduler)) {
if (did_batch_change) {
collective_epilogue.tensormaps_fence_acquire<IsEpiLoad>(epi_store_tensormap);
}
// Epilogue and write to gD
auto [epi_load_pipe_consumer_state_next, epi_store_pipe_producer_state_next] =
collective_epilogue.store(
epi_load_pipeline,
epi_load_pipe_consumer_state,
epi_store_pipeline,
epi_store_pipe_producer_state,
problem_shape_MNKL,
blk_shape,
blk_coord,
accumulators,
tiled_mma,
mma_thread_idx,
shared_storage.tensors.epilogue,
epi_store_tensormap,
work_tile_info.reduction_subtile_idx()
);
epi_load_pipe_consumer_state = epi_load_pipe_consumer_state_next;
epi_store_pipe_producer_state = epi_store_pipe_producer_state_next;
do_store_tail = true;
}
// Get next work tile
work_tile_info = scheduler.fetch_next_work(work_tile_info);
did_batch_change = curr_batch != work_tile_info.L_idx;
if (work_tile_info.is_valid() && did_batch_change) {
collective_epilogue.tensormaps_perform_update<IsEpiLoad>(
shared_storage.tensormaps.epilogue,
params.epilogue,
epi_store_tensormap,
work_tile_info.L_idx
);
// Converge before issuing tensormap fence release since fence is aligned
__syncwarp();
collective_epilogue.tensormaps_cp_fence_release<IsEpiLoad>(shared_storage.tensormaps.epilogue, epi_store_tensormap, lane_predicate);
}
} // Scheduler work fetch loop
if (do_store_tail) {
collective_epilogue.store_tail(
epi_load_pipeline,
epi_load_pipe_consumer_state,
epi_store_pipeline,
epi_store_pipe_producer_state
);
}
} // Consumer Warp Groups End
#endif
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::kernel
| include/cutlass/gemm/kernel/sm90_gemm_array_tma_warpspecialized_cooperative.hpp/0 | {
"file_path": "include/cutlass/gemm/kernel/sm90_gemm_array_tma_warpspecialized_cooperative.hpp",
"repo_id": "include",
"token_count": 15208
} | 32 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/*! \file
\brief Utilities for selecting default tile schedulers
*/
#include "cutlass/arch/arch.h"
#include "cutlass/detail/dependent_false.hpp"
#include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp"
#include "cutlass/gemm/kernel/sm90_tile_scheduler_stream_k.hpp"
#include "cutlass/gemm/kernel/sm90_tile_scheduler_group.hpp"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm {
////////////////////////////////////////////////////////////////////////////////
//
// Tags for specifying tile schedulers
//
struct PersistentScheduler { };
struct StreamKScheduler { };
struct GroupScheduler { }; // Only used for Grouped GEMMs
////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm
////////////////////////////////////////////////////////////////////////////////
namespace cutlass::gemm::kernel::detail {
//
// Selectors mapping tile scheduler tag and arch tag to a tile scheduler class
//
template <
class TileSchedulerTag,
class ArchTag,
class TileShape,
class ClusterShape
, class ProblemShapeType = void
>
struct TileSchedulerSelector {
static_assert(cutlass::detail::dependent_false<ArchTag>,
"Could not select a tile scheduler for given parameters.");
};
template <
class ArchTag,
class TileShape,
class ClusterShape
>
struct TileSchedulerSelector<
PersistentScheduler,
ArchTag,
TileShape,
ClusterShape
> {
using Scheduler = PersistentTileSchedulerSm90;
};
// Default (void) for Sm90 maps to PersistentTileSchedulerSm90
template <
class ArchTag,
class TileShape,
class ClusterShape
>
struct TileSchedulerSelector<
void,
ArchTag,
TileShape,
ClusterShape
> {
using Scheduler = typename TileSchedulerSelector<
PersistentScheduler,
ArchTag,
TileShape,
ClusterShape
>::Scheduler;
};
template <
class TileShape,
class ClusterShape
>
struct TileSchedulerSelector<
StreamKScheduler,
arch::Sm90,
TileShape,
ClusterShape
> {
using Scheduler = PersistentTileSchedulerSm90StreamK<TileShape, ClusterShape>;
};
template <
class TileShape,
class ClusterShape
, class GroupProblemShape
>
struct TileSchedulerSelector<
GroupScheduler,
arch::Sm90,
TileShape,
ClusterShape
, GroupProblemShape
> {
using Scheduler = PersistentTileSchedulerSm90Group<GroupProblemShape>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass::gemm::kernel::detail
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/tile_scheduler.hpp/0 | {
"file_path": "include/cutlass/gemm/kernel/tile_scheduler.hpp",
"repo_id": "include",
"token_count": 1205
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data
layout of the global memory fragments, data types, and internal tile sizes.
Partial specializations for threadblock::Mma operations targeting simt instructions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/threadblock/mma_pipelined.h"
#include "cutlass/gemm/threadblock/mma_singlestage.h"
#include "cutlass/arch/cache_operation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Size of a threadblock-scoped access
int kAccessSizeInBits = -1, // -1 denoting the default
/// Number of stages
int Stages = 2,
/// Operation performed by MMA
typename Operator = typename platform::conditional<
(platform::is_same<OperatorClass,
cutlass::arch::OpClassTensorOp>::value) &&
(platform::is_same<ElementA, int8_t>::value ||
platform::is_same<ElementA, int4b_t>::value ||
platform::is_same<ElementA, uint8_t>::value ||
platform::is_same<ElementA, uint4b_t>::value),
cutlass::arch::OpMultiplyAddSaturate,
cutlass::arch::OpMultiplyAdd>::type,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA =
cutlass::arch::CacheOperation::Global,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB =
cutlass::arch::CacheOperation::Global,
/// per-element transformation for elements of A
ComplexTransform TransformA = ComplexTransform::kNone,
/// per-element transformation for elements of B
ComplexTransform TransformB = ComplexTransform::kNone,
bool IsComplex = false // (is_complex<ElementA>::value || is_complex<ElementB>::value)
>
struct DefaultMmaCoreWithAccessSize;
template <
/// Shape of threadblock-scoped matrix multiply operator
typename Shape,
/// Shape of warp-level matrix multiply operator
typename WarpShape,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape,
/// Element data type of A operand
typename ElementA,
/// Layout of operand A
typename LayoutA,
/// Element data type of B operand
typename ElementB,
/// Layout of operand B
typename LayoutB,
/// Data type of accumulator
typename ElementC,
/// Layout of accumulator
typename LayoutC,
/// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp)
typename OperatorClass,
/// Number of stages
int Stages,
/// Operation performed by MMA
typename Operator,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor,
/// Cache operation of operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Cache operation of operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// per-element transformation for elements of A
ComplexTransform TransformA,
/// per-element transformation for elements of B
ComplexTransform TransformB,
bool IsComplex
>
struct DefaultMmaCoreWithAccessSize<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
OperatorClass, -1, Stages, Operator, AccumulatorsInRowMajor,
CacheOpA, CacheOpB, TransformA, TransformB, IsComplex
> : DefaultMmaCore<
Shape, WarpShape, InstructionShape,
ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
OperatorClass, Stages, Operator, AccumulatorsInRowMajor,
CacheOpA, CacheOpB, TransformA, TransformB, IsComplex
> {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization:
///
/// A: column-major
/// B: row-major
/// Operator: simt class
///
/// This uses the default warp-level operator given tile sizes
template <
/// Shape of threadblock-scoped matrix multiply operator (concept:
/// GemmShape)
typename Shape_,
/// Shape of warp-level matrix multiply operator (concept: GemmShape)
typename WarpShape_,
/// Data type of A operand
typename ElementA_,
/// Data type of B operand
typename ElementB_,
/// Data type of accumulator
typename ElementC_,
/// Layout of accumulator
typename LayoutC_,
/// Size of a threadblock-scoped access (a value of -1 indicates the default)
int kAccessSizeInBits_,
/// Operation performed by GEMM
typename Operator_>
struct DefaultMmaCoreWithAccessSize<Shape_, WarpShape_, typename platform::enable_if<kAccessSizeInBits_ != -1, GemmShape<1, 1, 1>>::type, ElementA_,
layout::ColumnMajor, ElementB_, layout::RowMajor,
ElementC_, LayoutC_, arch::OpClassSimt, kAccessSizeInBits_, 2, Operator_
> {
using Shape = Shape_;
using WarpShape = WarpShape_;
using InstructionShape = GemmShape<1, 1, 1>;
using ElementA = ElementA_;
using LayoutA = layout::ColumnMajor;
using ElementB = ElementB_;
using LayoutB = layout::RowMajor;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using OperatorClass = arch::OpClassSimt;
static int const PartitionsK = Shape::kK / WarpShape::kK;
/// Default Operator
using Operator = Operator_;
/// Number of warps present
using WarpCount = GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
PartitionsK
>;
// Divisility requirements
static_assert(
!(Shape::kM % WarpShape::kM) &&
!(Shape::kN % WarpShape::kN),
"Threadblock-scoped GEMM should be divisible by warp-scoped GEMM size."
);
/// Number of threads per warp
static int const kWarpSize = warp::WarpSize<arch::OpClassSimt>::value;
/// Number of threads total
static int const kThreads = WarpCount::kCount * kWarpSize;
static int const kElementsPerAccessDefault = 1;
static_assert(kAccessSizeInBits_ == -1 ||
sizeof_bits<ElementA>::value == sizeof_bits<ElementB>::value ||
kAccessSizeInBits_ / sizeof_bits<ElementA>::value == kElementsPerAccessDefault,
"Non-default value for kAccessSizeInBits_ is only allowed if size(elementA) == sizeof(elementB)");
static int const kElementsPerAccess = (kAccessSizeInBits_ != -1) ? kAccessSizeInBits_ / sizeof_bits<ElementA>::value : kElementsPerAccessDefault;
//
// Shared memory layouts
//
using SmemLayoutA = layout::ColumnMajor;
using SmemLayoutB = layout::RowMajor;
//
// Iterators to write to shared memory
//
/// ThreadMap of iterator A
using IteratorThreadMapA = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kM, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to A operand
using SmemIteratorA = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
ElementA,
SmemLayoutA,
1,
IteratorThreadMapA
>;
/// Policy of iterator B
using IteratorThreadMapB = transform::PitchLinearStripminedThreadMap<
layout::PitchLinearShape<Shape::kN, Shape::kK>,
kThreads,
kElementsPerAccess
>;
/// Shared memory iterator to B operand
using SmemIteratorB = transform::threadblock::RegularTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
ElementB,
SmemLayoutB,
0,
IteratorThreadMapB
>;
//
// Warp-level matrix multiply operator
//
// Define the warp-level op
static const int WarpNumThreadsM = detail::simt_get_warp_threads_m<WarpShape>();
static const int WarpNumThreadsN = kWarpSize / WarpNumThreadsM;
static const int ThreadTileM = WarpShape::kM / WarpNumThreadsM;
static const int ThreadTileN = WarpShape::kN / WarpNumThreadsN;
static_assert(!(WarpShape::kM % WarpNumThreadsM) && !(WarpShape::kN % WarpNumThreadsN),
"WarpShape must be divisible by ThreadTile shape.");
static const int LaneLayout = ThreadTileM > 4 && ThreadTileN > 4 ? 2 : 1;
static const int numElementsA = 128 / sizeof_bits<ElementA>::value;
static const int numElementsB = 128 / sizeof_bits<ElementB>::value;
static const int LaneM = cutlass::const_min(numElementsA, ThreadTileM);
static const int LaneN = cutlass::const_min(numElementsB, ThreadTileN);
// these should have max of thread tile also
using LaneMmaShape = cutlass::gemm::GemmShape<
LaneM,
LaneN,
1>;
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<WarpNumThreadsM, WarpNumThreadsN>, // WarpShape
cutlass::layout::RowMajorInterleaved<LaneLayout>, // LaneLayout
LaneMmaShape
>;
using MmaWarpSimt = cutlass::gemm::warp::MmaSimt<
WarpShape, /// Size of the Gemm problem - concept: gemm::GemmShape<> 128, 128, 8
ElementA, /// Data type of A elements
SmemLayoutA, /// Layout of A matrix (concept: MatrixLayout)
ElementB, /// Data type of B elements
SmemLayoutB, /// Layout of B matrix (concept: MatrixLayout)
ElementC, /// Element type of C matrix
LayoutC, /// Layout of C matrix (concept: MatrixLayout)
Policy /// Policy describing warp-level MmaSimtOp (concept: MmaSimtOp policy)
>; /// Used for partial specialization
/// Policy used to define MmaPipelined
using MmaPolicy = MmaPolicy<
MmaWarpSimt,
MatrixShape<0, 0>,
MatrixShape<0, 0>,
WarpCount::kK
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/default_mma_core_with_access_size.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_mma_core_with_access_size.h",
"repo_id": "include",
"token_count": 4136
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a threadblock-scoped GEMV kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix-vector product using SIMT math instructions.
template <
class Core_ //< GemvCore
>
class Gemv {
public:
using Shape = typename Core_::Shape;
/// The MMA operator that computes GEMV
using Operator = typename Core_::Operator;
/// Iterates over A in global memory
using IteratorA = typename Core_::IteratorA;
/// Iterates over B in global memory
using IteratorB = typename Core_::IteratorB;
/// Fragment of operand C loaded from global memory
using IteratorC = typename Core_::IteratorC;
/// Fragment of operand A loaded from global memory
using FragmentA = typename IteratorA::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB = typename IteratorB::Fragment;
/// Fragment of operand accumulator loaded/stored to global memory
using FragmentC = typename Operator::FragmentC;
/// Shape of the per-thread GEMV operation
using ThreadShape = typename Core_::ThreadShape;
public:
CUTLASS_DEVICE
Gemv() { }
CUTLASS_DEVICE
void operator()(
GemmCoord const &problem_size, ///< problem size of batched GEMV
FragmentC &accum, ///< destination accumulator tile
IteratorA iterator_A, ///< iterator over A operand in global memory
IteratorB iterator_B, ///< iterator over B operand in global memory
FragmentC const &src_accum) { ///< source accumualtor tile
//
// Prologue
//
FragmentA frag_A;
FragmentB frag_B;
frag_A.clear();
frag_B.clear();
iterator_A.load(frag_A);
iterator_B.load(frag_B);
++iterator_A;
++iterator_B;
//
// Mainloop
//
Operator thread_mma;
int gemm_k = problem_size.k();
if (gemm_k < Shape::kK)
{
iterator_A.clear_mask();
iterator_B.clear_mask();
}
// iterate over K to accumulate result
CUTLASS_GEMM_LOOP
for (; gemm_k > 0; gemm_k -= Shape::kK) {
thread_mma(accum, frag_A, frag_B, accum);
iterator_A.load(frag_A);
iterator_B.load(frag_B);
++iterator_A;
++iterator_B;
if (gemm_k < Shape::kK)
{
iterator_A.clear_mask();
iterator_B.clear_mask();
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/gemv.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/gemv.h",
"repo_id": "include",
"token_count": 1482
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implements streamk threadblock mapping blockIdx to GEMM problems.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/platform/platform.h"
#include "cutlass/gemm/gemm_enumerated_types.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/gemm/threadblock/index_remat.h"
#if !defined(__CUDACC_RTC__)
#include <iostream>
#include "cutlass/core_io.h"
#include "cutlass/trace.h"
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Threadblock mapping control for GEMMs
struct ThreadblockSwizzleStreamK {
/// Advertise StreamkFeature
using StreamkFeature = void;
/// Kernel traits
template <typename GemmKernel>
struct KernelTraits {};
/// Reduction strategy
enum ReductionStrategy
{
kNone, // Data-parallel strategy (no seams, fixup, etc.)
kAtomic, // Non-deterministic reduction of SK-block partials using atomic aggregation in L2
kMixed, // Deterministic reduction of SK-block partials employing either:
// (a) A separate wave of reduction thread blocks" (for scenarios with lots of
// SK-blocks per SK-tile)
// (b) Turnstile-ordered atomic aggregation in L2 (for scenarios with few
// SK-blocks per SK-tile)
};
static ReductionStrategy const kReductionStrategy = kMixed;
//
// Heuristics
//
/// Data-parallel wave-quantization efficiency threshold (above which we go data-parallel)
static float constexpr kDpEfficiencyThreshold = 0.92f;
/// Minimum number of MAC-iterations per streamk block
static int const kMinItersPerSkBlock = 2;
/// Height in CTAs of a grid rasterization cohort
static int const kCohortCtasM = 8;
/// Width in CTAs of a grid rasterization cohort
static int const kCohortCtasN = 4;
/// Number of CTAs per cohort
static int const kCtasPerCohort = kCohortCtasN * kCohortCtasM;
/// Cost-equivalent number of SM-iterations for fixup I/O
static int const kFixupStartupIterEquiv = 10;
static int const kFixupPeerIterEquiv = 3;
//
// Member state
//
/// The 3D value-extents of the GEMM computation volume (m,n,k)
GemmCoord problem_size;
/// Div/mod accelerators
FastDivmod div_mod_tiled_shape_m;
FastDivmod div_mod_tiled_shape_n;
FastDivmod div_mod_tiled_cohort_shape_n;
FastDivmod div_mod_iters_per_tile;
/// Whether to perform cohort CTA rasterization
bool cohort_raster;
// Whether to pad and remap block indices
bool remap_block_indices;
/// CTA occupancy per SM
int sm_occupancy;
/// Number of SMs for dispatch heuristics to load-balance using Stream-K CTAs (wave size)
int avail_sms;
int dp_blocks; /// Number of data-parallel thread blocks in the grid
int dp_first_wave_tiles; /// Number of output tiles each CTA in the first DP wave will produce
/// Number of reduction blocks in the grid
int reduction_blocks;
int sk_waves;
int sk_tiles;
int sk_big_blocks_per_region;
int sk_iters_per_region;
/// Div/mod accelerators
FastDivmod div_mod_sk_iters_per_normal_block;
FastDivmod div_mod_sk_iters_per_big_block;
FastDivmod div_mod_sk_iters_per_region;
FastDivmod div_mod_sk_regions; //!! used in block map
FastDivmod div_mod_sk_blocks_per_region; //!! used in block map
/// The batch count
int batch_count;
//
// Host+device interface
//
/// Constructor
ThreadblockSwizzleStreamK() = default;
/// Returns the GEMM volume in thread block tiles
CUTLASS_HOST_DEVICE
GemmCoord tiled_shape() const
{
return GemmCoord(
static_cast<int>(div_mod_tiled_shape_m),
static_cast<int>(div_mod_tiled_shape_n),
batch_count);
}
/// Number of iterations per output tile
CUTLASS_HOST_DEVICE
int iters_per_tile() const
{
return static_cast<int>(div_mod_iters_per_tile);
}
/// Number of iterations for normal SK-blocks
CUTLASS_HOST_DEVICE
int sk_iters_per_normal_block() const
{
return static_cast<int>(div_mod_sk_iters_per_normal_block);
}
/// Number of SK regions
CUTLASS_HOST_DEVICE
int sk_regions() const
{
return static_cast<int>(div_mod_sk_regions);
}
/// Number of SK blocks per region (splitting factor)
CUTLASS_HOST_DEVICE
int sk_blocks_per_region() const
{
return static_cast<int>(div_mod_sk_blocks_per_region);
}
//
// Host-side interface
//
/// Debug print
void Print()
{
#ifndef __CUDA_ARCH__
auto tiles = tiled_shape().mn().product();
std::cout <<
"problem_size: (" << problem_size.m() << "," << problem_size.n() << ")" <<
", tiled_shape: (" << tiled_shape().m() << "," << tiled_shape().n() << ")" <<
", tiles: " << tiles <<
", dp_tiles: " << tiles - sk_tiles <<
", sk_tiles: " << sk_tiles <<
", iters_per_tile: " << iters_per_tile() <<
", reduction_blocks: " << reduction_blocks <<
", dp_blocks: " << dp_blocks <<
", dp_waves: " << dp_blocks / avail_sms <<
", dp_first_wave_tiles: " << dp_first_wave_tiles <<
", sk_blocks_per_region: " << sk_blocks_per_region() <<
", sk_regions: " << sk_regions() <<
", sk_waves: " << sk_waves <<
", sk_iters_per_normal_block: " << sk_iters_per_normal_block() <<
", sk_big_blocks_per_region: " << sk_big_blocks_per_region <<
", remap_block_indices: " << remap_block_indices <<
", cohort_raster: " << cohort_raster <<
", sm_occupancy: " << sm_occupancy <<
", avail_sms: " << avail_sms <<
", num_blocks: " << get_num_blocks() <<
"\n\n";
#endif
}
// Compute sk_blocks to dispatch for a given number of sk_tiles
static void get_sk_blocks(
int &sk_blocks, /// [out]
int &savings_iters, /// [out]
int sk_tiles,
int iters_per_tile,
int avail_sms,
int max_sk_occupancy,
bool allow_partial_wave)
{
savings_iters = INT_MIN;
sk_blocks = 0;
if (sk_tiles == 0) {
return;
}
int sk_iters = sk_tiles * iters_per_tile;
int dp_equiv_waves = (sk_tiles + avail_sms - 1) / avail_sms;
int dp_equiv_iters = iters_per_tile * dp_equiv_waves;
int min_sk_blocks = (allow_partial_wave) ? fast_min(avail_sms, sk_tiles + 1) : avail_sms;
int max_sk_blocks = fast_min(avail_sms * max_sk_occupancy, sk_iters / kMinItersPerSkBlock);
for (int trial_sk_blocks = min_sk_blocks; trial_sk_blocks <= max_sk_blocks; ++trial_sk_blocks)
{
int sk_waves = (trial_sk_blocks + avail_sms - 1) / avail_sms;
int max_sk_iters_per_block = (sk_iters + trial_sk_blocks - 1) / trial_sk_blocks;
int sk_iter_equiv = max_sk_iters_per_block * sk_waves;
int num_peers = ((trial_sk_blocks + sk_tiles - 1) / sk_tiles) + 1; // add one for alignment skew
float iter_cost = 0.02f * float(num_peers) * float(sk_iter_equiv);
if (trial_sk_blocks % sk_tiles == 0)
{
// aligned
num_peers = (trial_sk_blocks / sk_tiles);
iter_cost = 0.0f;
}
float peer_cost = 2.0f * float(num_peers);
float base_cost = 2.0f * float(sk_waves);
int fixup_iter_equiv = int(base_cost + iter_cost + peer_cost);
int trial_savings_iters = dp_equiv_iters - sk_iter_equiv - fixup_iter_equiv;
if (trial_savings_iters >= savings_iters) {
savings_iters = trial_savings_iters;
sk_blocks = trial_sk_blocks;
}
}
}
/// Determine the populations of DP and SK blocks to invoke for the given number of output tiles
static void get_blocks(
int &dp_tiles, /// [out]
int &sk_blocks, /// [out]
int output_tiles,
int iters_per_tile,
int avail_sms,
int sm_occupancy)
{
int full_waves = output_tiles / avail_sms;
int full_wave_tiles = full_waves * avail_sms;
int partial_wave_tiles = output_tiles - full_wave_tiles;
int score = -1;
dp_tiles = output_tiles;
sk_blocks = 0;
if (partial_wave_tiles == 0)
{
// Perfect quantization
return;
}
if (full_waves < sm_occupancy)
{
// We're less than full GPU occupancy
// Form the SK wave from the partial wave to get us up to full GPU occupancy
int max_sk_occupancy = sm_occupancy - full_waves;
dp_tiles = full_wave_tiles;
get_sk_blocks(
sk_blocks,
score,
partial_wave_tiles,
iters_per_tile,
avail_sms,
max_sk_occupancy,
true); // we can run with less than a full wave of SK-blocks
if (score < 0) {
// not profitable
sk_blocks = 0;
dp_tiles = output_tiles;
}
return;
}
// We're at (or greater) than GPU occupancy
if ((sm_occupancy > 1 ) && (full_waves % sm_occupancy == sm_occupancy - 1))
{
// If occupancy is more than one CTA per SM, form the SK wave from the partial
// wave to get us to full GPU occupancy
int max_sk_occupancy = 1;
dp_tiles = full_wave_tiles;
get_sk_blocks(
sk_blocks,
score,
partial_wave_tiles,
iters_per_tile,
avail_sms,
max_sk_occupancy,
true); // we can run with less than a full wave of SK-blocks
if (score >= 0) {
return;
}
}
// Form the SK wave by combining the last full wave and the partial wave
// We're less than full GPU occupancy
dp_tiles = full_wave_tiles - avail_sms;
int max_sk_occupancy = sm_occupancy - ((full_waves - 1) % sm_occupancy);
get_sk_blocks(
sk_blocks,
score,
partial_wave_tiles + avail_sms,
iters_per_tile,
avail_sms,
max_sk_occupancy,
false); // we cannot run with less than a full wave of SK-blocks
if (score < 0) {
// not profitable
sk_blocks = 0;
dp_tiles = output_tiles;
}
}
/// Constructor: *Gemm* problem size (m, n, k)
ThreadblockSwizzleStreamK(
GemmUniversalMode const mode_,
GemmCoord const problem_size_,
GemmCoord const tile_size_,
int const batch_split_, /// Either (mode == GemmUniversalMode::kBatched) the batch count, or (mode == GemmUniversalMode::kGemm) the tile-splitting factor (1 defaults to StreamK, >1 emulates Split-K)
int const sm_occupancy_,
int const device_sms_,
int const avail_sms_, /// The number of SMs that StreamK dispatch heuristics will attempt to load-balance across (-1 defaults to device width, 1 implies classic data-parallel scheduling)
size_t const element_A_bytes_,
size_t const element_B_bytes_,
size_t const element_C_bytes_,
int const epilogue_acc_fragments_)
:
problem_size(problem_size_),
batch_count((mode_ == GemmUniversalMode::kBatched || mode_ == GemmUniversalMode::kArray) ? batch_split_ : 1),
reduction_blocks(0),
dp_blocks(0),
dp_first_wave_tiles(1), // Default: one tile per DP-block in the first wave of DP blocks
sk_tiles(0),
sk_big_blocks_per_region(0),
sk_iters_per_region(0),
sk_waves(0),
sm_occupancy(sm_occupancy_),
remap_block_indices(false),
avail_sms(fast_max(1, avail_sms_)),
cohort_raster(false)
{
int gpu_occupancy = device_sms_ * sm_occupancy;
int iters_per_tile = (problem_size.k() + tile_size_.k() - 1) / tile_size_.k();
int sk_iters_per_normal_block = 0;
int sk_regions = 1; // Default: a single region of iteration space (across all SK tiles)
int sk_blocks_per_region = 0;
GemmCoord tiled_shape(
(problem_size.m() + tile_size_.m() - 1) / tile_size_.m(),
(problem_size.n() + tile_size_.n() - 1) / tile_size_.n(),
batch_count);
size_t problem_bytes =
(element_C_bytes_ * problem_size.m() * problem_size.n()) +
(element_A_bytes_ * problem_size.m() * problem_size.k()) +
(element_B_bytes_ * problem_size.k() * problem_size.n());
size_t problem_flops = size_t(problem_size.m()) * size_t(problem_size.n()) * size_t(problem_size.k()) * 2;
[[maybe_unused]] float flops_per_byte = float(problem_flops) / float(problem_bytes);
int output_tiles = tiled_shape.m() * tiled_shape.n();
int waves = (output_tiles + avail_sms - 1) / avail_sms;
[[maybe_unused]] float dp_efficiency = float(output_tiles) / float(waves * avail_sms);
//
// Determine dispatch composition of DP-tiles and SK-blocks
//
// Start with a DP-only configuration
int dp_tiles = output_tiles; // Number of data-parallel tiles
int sk_blocks = 0; // Number of thread blocks to produce the remaining SK tiles
// Only kGemm mode allows for SK load balancing
if (mode_ == GemmUniversalMode::kGemm)
{
int split_factor = batch_split_;
if (split_factor > 1)
{
// Split-K override
dp_tiles = 0;
sk_blocks = output_tiles * split_factor;
}
else if ((kReductionStrategy != kNone) && // Load-balancing strategy statically enabled
(avail_sms > 1)) // Plurality of SMs to load balance across
{
// Use heuristics
get_blocks(
dp_tiles, /// [out]
sk_blocks, /// [out]
output_tiles,
iters_per_tile,
avail_sms,
sm_occupancy);
}
}
sk_tiles = output_tiles - dp_tiles;
// Compute SK block iteration details
if (sk_blocks > 0)
{
sk_waves = (sk_blocks + avail_sms - 1) / avail_sms;
int sk_iters = sk_tiles * iters_per_tile;
sk_blocks = fast_min(sk_blocks, sk_iters);
sk_iters_per_normal_block = sk_iters / sk_blocks;
int extra_sk_iters = sk_iters - (sk_iters_per_normal_block * sk_blocks);
int sk_big_blocks = extra_sk_iters;
if ((sk_blocks > sk_tiles) && (sk_blocks % sk_tiles == 0))
{
// Split-K decomposition
sk_regions = sk_tiles;
}
sk_blocks_per_region = sk_blocks / sk_regions;
sk_big_blocks_per_region = sk_big_blocks / sk_regions;
sk_iters_per_region = sk_iters / sk_regions;
// Use a separate reduction wave when all of:
// - Non-atomic reduction stratgy
// - The number of SK waves won't fully occupy the GPU (Otherwise we don't have
// a strong-scaling case for more parallel reduction)
// - More than three peers working on an SK tile. (This occurs when the ratio of
// SK-blocks to SK-tiles > 2, as a single tile may be covered by four SK-blocks,
// e.g.:[partial-block | block | block | partial-block] ). With three or
// less peers, the two non-finishing SK-blocks are not expexted to contend.
if ((kReductionStrategy == kMixed) &&
(sk_waves < sm_occupancy) &&
(sk_blocks > 2 * sk_tiles))
{
// Launch a reduction block for every accumulator fragment in each SK-tile
reduction_blocks = sk_tiles * epilogue_acc_fragments_;
}
// When we have a multi-occupancy kernel and at least two waves of active blocks (where
// at least one wave is SK blocks), we need to (1) dispatch at least four waves, and (2)
// remap the block indices so that we can reliably spread the SK blocks evenly across the
// device's first SM occupancy valence. Also see get_num_blocks() and get_block_idx().
remap_block_indices = (
(sm_occupancy > 1) &&
(device_sms_ == avail_sms) &&
(get_num_active_blocks() > avail_sms * 2));
// Initialize fast div/mod members related to SK
div_mod_sk_iters_per_normal_block = FastDivmod(sk_iters_per_normal_block);
div_mod_sk_iters_per_big_block = FastDivmod(sk_iters_per_normal_block + 1);
div_mod_sk_iters_per_region = FastDivmod(sk_iters_per_region);
div_mod_sk_regions = FastDivmod(sk_regions);
div_mod_sk_blocks_per_region = FastDivmod(sk_blocks_per_region);
}
//
// Compute DP blocks
//
dp_blocks = dp_tiles;
cutlass::gemm::GemmCoord tiled_cohort_shape(
(tiled_shape.m() + kCohortCtasM - 1) / kCohortCtasM,
(tiled_shape.n() + kCohortCtasN - 1) / kCohortCtasN,
tiled_shape.k());
int cohort_blocks = (tiled_cohort_shape.m() * tiled_cohort_shape.n()) * kCtasPerCohort;
float cohort_efficiency = float(dp_blocks) / float(cohort_blocks);
// Check if the SK tiles would be in cohorts that are in-bounds
bool sk_in_range = true;
if (sk_tiles > 0)
{
int last_sk_tile = sk_tiles - 1;
int cohort_tile_idx = last_sk_tile / kCtasPerCohort;
int cohort_grid_m = cohort_tile_idx / tiled_cohort_shape.n();
int cohort_grid_n = (cohort_grid_m > 0) ?
tiled_cohort_shape.n() - 1 :
cohort_tile_idx % tiled_cohort_shape.n();
if ((((cohort_grid_m + 1) * kCohortCtasM) >= tiled_shape.m()) ||
(((cohort_grid_n + 1) * kCohortCtasN) >= tiled_shape.n()))
{
sk_in_range = false;
}
}
// Decide if we're going to be doing cohort raster
if (sk_in_range &&
(dp_blocks >= gpu_occupancy * 2) &&
(cohort_efficiency > 0.85f))
{
cohort_raster = true;
dp_blocks = cohort_blocks;
}
else if (sk_waves > 0)
{
// Update semi-persistence of first DP wave to ensure full grid wavesets
// (Only applies when there's an SK component and we're not doing blocked cohort rasterization)
int dp_tile_waves = (dp_tiles + avail_sms - 1) / avail_sms;
int full_dp_tile_waves = dp_tiles / avail_sms;
int waveset_excess = (sk_waves + dp_tile_waves) % sm_occupancy;
if (dp_first_wave_tiles + waveset_excess <= full_dp_tile_waves)
{
dp_first_wave_tiles += waveset_excess;
dp_blocks -= (waveset_excess * avail_sms);
}
}
// Setup fast-div/mod for device-side usage
div_mod_tiled_shape_m = FastDivmod(tiled_shape.m());
div_mod_tiled_shape_n = FastDivmod(tiled_shape.n());
div_mod_tiled_cohort_shape_n = FastDivmod(tiled_cohort_shape.n());
div_mod_iters_per_tile = FastDivmod(iters_per_tile);
}
/// Number of blocks performing useful work
int get_num_active_blocks() const
{
return (sk_waves * avail_sms) + dp_blocks + reduction_blocks;
}
/// Obtains number of threadblocks per GEMM
int get_num_blocks() const
{
int active_blocks = get_num_active_blocks();
if (remap_block_indices)
{
// Add padding blocks if we are performing remapping in order to dispatch a grid of at least four waves
return fast_max(active_blocks, avail_sms * 4);
}
return active_blocks;
}
/// Obtains grid extents in CTAs
dim3 get_grid_dims() const
{
return dim3(get_num_blocks(), 1, batch_count);
}
//
// Device-side interface
//
/// Obtains number of threadblocks per GEMM
CUTLASS_DEVICE
int device_num_blocks() const
{
return gridDim.x;
}
/// Obtains tile index for the given sk iteration
CUTLASS_DEVICE
int get_sk_tile_idx(int iter) const
{
int tile_idx = div_mod_iters_per_tile.div(iter);
return tile_idx;
}
/// Obtains the batch index
CUTLASS_DEVICE
int get_batch_idx() const
{
return RematerializeBlockIdxZ();
}
/// Obtains the calling threadblock's tiled coordinates for the given tile index
CUTLASS_DEVICE
GemmCoord get_tile_offset(int tile_idx) const
{
int m, n;
// row-major raster
div_mod_tiled_shape_n(m, n, tile_idx);
if (tiled_shape().m() < tiled_shape().n())
{
// column-major raster
div_mod_tiled_shape_m(n, m, tile_idx);
}
if (cohort_raster)
{
// tiled cohort raster
int cohort_tile_idx = tile_idx / kCtasPerCohort;
int cohort_grid_m, cohort_grid_n;
div_mod_tiled_cohort_shape_n(cohort_grid_m, cohort_grid_n, cohort_tile_idx);
int block_idx_cohort = tile_idx % kCtasPerCohort;
int block_cohort_m = block_idx_cohort / kCohortCtasN;
int block_cohort_n = block_idx_cohort % kCohortCtasN;
m = (cohort_grid_m * kCohortCtasM) + block_cohort_m;
n = (cohort_grid_n * kCohortCtasN) + block_cohort_n;
}
return GemmCoord(m, n, get_batch_idx());
}
/// Obtains the calling threadblock's tiled coordinates for the given tile index (row-major rasterization)
CUTLASS_DEVICE
GemmCoord get_tile_offset_row_major(int tile_idx) const
{
// row-major raster
int m, n;
div_mod_tiled_shape_n(m, n, tile_idx);
return GemmCoord(m, n, get_batch_idx());
}
/// Obtains calling threadblock's linear threadblock index
CUTLASS_DEVICE
int get_block_idx() const
{
int block_idx = RematerializeBlockIdxX();
// Remap the block indices for the first two waves of thread blocks if
// we have multi-occupancy and the grid constitutes four or more waves
if (remap_block_indices && (block_idx < avail_sms * 2))
{
int dest_sm = block_idx / 2;
int dest_wave = block_idx % 2;
int remapped_block_idx = dest_sm + (dest_wave * avail_sms);
block_idx = remapped_block_idx;
}
// Remap block indices to interleave SK regions to limit intra-region waiting
if (block_idx < sk_regions() * sk_blocks_per_region())
{
int block_in_region;
int region;
div_mod_sk_regions(block_in_region, region, block_idx);
block_idx = (region * sk_blocks_per_region()) + block_in_region;
}
return block_idx;
}
/// Obtains calling linear threadblock index of the first block to work on the given tile
CUTLASS_DEVICE
int get_sk_block_idx(int iter) const
{
int region_idx;
int iter_in_region;
div_mod_sk_iters_per_region(region_idx, iter_in_region, iter);
int big_block_iters = (sk_big_blocks_per_region * sk_iters_per_normal_block()) + sk_big_blocks_per_region; // number of iterations in the region's big blocks
int normal_block_iters = iter_in_region - big_block_iters; // number of iterations in the region's normal blocks
int big_block_idx_in_region = div_mod_sk_iters_per_big_block.div(iter_in_region);
int normal_block_idx_in_region = sk_big_blocks_per_region + div_mod_sk_iters_per_normal_block.div(normal_block_iters);
int block_idx_in_region = (big_block_idx_in_region < sk_big_blocks_per_region) ?
big_block_idx_in_region :
normal_block_idx_in_region;
int owning_block_idx = (sk_blocks_per_region() * region_idx) + block_idx_in_region;
return owning_block_idx;
}
/// Obtains iteration extends for the given SK block index
CUTLASS_DEVICE
void get_iter_extents(
int sk_block_idx,
int &block_iter_begin,
int &block_iter_end) const
{
int region_idx;
int block_idx_in_region;
div_mod_sk_blocks_per_region(region_idx, block_idx_in_region, sk_block_idx);
block_iter_begin = (region_idx * sk_iters_per_region) + (block_idx_in_region * sk_iters_per_normal_block());
// Adjust extents for the first "num_big_blocks" blocks that get one extra iteration
int block_iters = sk_iters_per_normal_block();
if (block_idx_in_region < sk_big_blocks_per_region) {
// This is a +1 iteration block
block_iter_begin += block_idx_in_region;
block_iters++;
} else {
// This is a regular block
block_iter_begin += sk_big_blocks_per_region;
}
block_iter_end = block_iter_begin + block_iters;
}
/// Obtains calling linear threadblock index of the first block to work on the given tile
CUTLASS_DEVICE
int get_first_block_idx(int tile_idx, int block_idx) const
{
if (tile_idx >= sk_tiles) {
// DP tile
return block_idx;
}
int iter = tile_idx * iters_per_tile();
return get_sk_block_idx(iter);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/threadblock/threadblock_swizzle_streamk.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/threadblock_swizzle_streamk.h",
"repo_id": "include",
"token_count": 10734
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/thread/mma.h"
#include "cutlass/gemm/warp/mma_simt_tile_iterator.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK = 1,
/// Complex transformation on operand A
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transformation on operand B
ComplexTransform TransformB = ComplexTransform::kNone,
/// Used for partial specialization
typename Enable = bool
>
class MmaSimt {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassSimt;
/// Hard-coded for now
using ArchTag = arch::Sm50;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Layout of threads
using ThreadLayoutA = typename platform::conditional< platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutA >::value,
layout::ColumnMajor,
typename platform::conditional < platform::is_same< layout::RowMajorInterleaved<4>, LayoutA >::value,
layout::RowMajor,
LayoutA>::type
>::type;
using ThreadLayoutB = typename platform::conditional< platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutB >::value,
layout::ColumnMajor,
typename platform::conditional < platform::is_same< layout::RowMajorInterleaved<4>, LayoutB >::value,
layout::RowMajor,
LayoutB>::type
>::type;
static constexpr bool use_dp4a = (platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutA>::value ||
platform::is_same< layout::RowMajorInterleaved<4>, LayoutA >::value) &&
platform::is_same< ElementA, int8_t >::value &&
platform::is_same< ElementB, int8_t >::value;
using dp4a_type = typename platform::conditional< use_dp4a , int8_t, bool >::type;
/// Thread-level matrix multiply accumulate operator
using ThreadMma = thread::Mma<
GemmShape<
Shape::kM / Policy::WarpShape::kRow,
Shape::kN / Policy::WarpShape::kColumn,
Policy::LaneMmaShape::kK>,
ElementA,
ThreadLayoutA,
ElementB,
ThreadLayoutB,
ElementC,
LayoutC,
arch::OpMultiplyAdd,
dp4a_type
>;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename ThreadMma::ArchMmaOperator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Shape of the underlying instruction
using InstructionShape = GemmShape<1,1,use_dp4a ? 4 : 1>;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaSimtTileIterator<
MatrixShape<Shape::kM, Policy::LaneMmaShape::kK>,
Operand::kA,
ElementA,
LayoutA,
Policy,
PartitionsK,
Shape::kK
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaSimtTileIterator<
MatrixShape<Policy::LaneMmaShape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
Policy,
PartitionsK,
Shape::kK
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed A tile
using TransformedFragmentB = FragmentB;
/// Iterates over the C operand in memory
using IteratorC = MmaSimtTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
Operand::kC,
ElementC,
LayoutC,
Policy
>;
/// Storage for C tile
using FragmentC = typename ThreadMma::FragmentC;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaSimt() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &d,
FragmentA a,
FragmentB b,
FragmentC const &c, int group_idx = 0) const {
ThreadMma mma;
if (kTransformA == ComplexTransform::kConjugate) {
conjugate<FragmentA> conj_a;
a = conj_a(a);
}
if (kTransformB == ComplexTransform::kConjugate) {
conjugate<FragmentB> conj_b;
b = conj_b(b);
}
mma(d, a, b, c);
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
| include/cutlass/gemm/warp/mma_simt.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_simt.h",
"repo_id": "include",
"token_count": 2843
} | 37 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Reduce operand A or B along K dimension
bool ReduceKForA_,
/// Number of partitions along K dimension
int PartitionsK_ = 1,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false,
/// Used for partial specialization
typename Enable = bool
>
class MmaWithReductionTensorOp {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Architecture tag from underlying instruction
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Complex transform on A operand
static ComplexTransform const kTransformA = ComplexTransform::kNone;
/// Complex transform on B operand
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
static bool const kReduceKForA = ReduceKForA_;
static_assert(platform::is_same<ElementA, cutlass::half_t>::value ||
platform::is_same<ElementA, cutlass::bfloat16_t>::value,
"ElementA needs to be fp16 or bf16.");
static_assert(platform::is_same<ElementB, cutlass::half_t>::value ||
platform::is_same<ElementB, cutlass::bfloat16_t>::value,
"ElementB needs to be fp16 or bf16.");
static_assert(platform::is_same<InstructionShape,
cutlass::gemm::GemmShape<16, 8, 16>>::value,
"Only supports 16x8x16 tensor core instruction.");
static_assert(!AccumulatorsInRowMajor,
"Only calls tensor core instructions in column major.");
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>, Operand::kA, ElementA, LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow, kThreadCount, kPartitionsK>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA =
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements>;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>, Operand::kB, ElementB, LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kRow, kThreadCount, kPartitionsK>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB =
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC,
typename ArchMmaOperator::Shape, typename Policy::OpDelta>;
/// Storage for C tile
using FragmentC = typename IteratorC::Fragment;
/// Number of mma operations performed
using MmaIterations = MatrixShape<
(Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM,
(Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN
>;
using FragmentReduction = Array<ElementC, kReduceKForA ? (Shape::kM / 8) : (Shape::kN / 8)>;
public:
/// Underlying matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaWithReductionTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
TransformedFragmentA const &A,
TransformedFragmentB const &B,
FragmentC const &C,
FragmentReduction &gemm_k_reduction
) const {
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
D = C;
[[maybe_unused]] MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A);
[[maybe_unused]] MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B);
[[maybe_unused]] MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)
assert(0);
#elif defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
// Serpentine visitation order maximizing reuse of Ra
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n);
mma(ptr_D[m + n_serpentine * MmaIterations::kRow],
ptr_A[m],
ptr_B[n_serpentine],
ptr_D[m + n_serpentine * MmaIterations::kRow]);
if (!kReduceKForA && m == 0) {
#if 0
gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4]);
gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4 + 1]);
gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4 + 2]);
gemm_k_reduction[n_serpentine] += float(B[n_serpentine * 4 + 3]);
#else
uint32_t const *tmp = reinterpret_cast<uint32_t const *>(&B);
if (platform::is_same<ElementB, cutlass::half_t>::value) {
asm volatile(
"{\n\t"
" .reg .f16 low, high;\n\t"
" .reg .f32 tmp;\n\t"
" mov.b32 {low, high}, %1;\n\t"
" cvt.f32.f16 tmp, low;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" cvt.f32.f16 tmp, high;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" mov.b32 {low, high}, %2;\n\t"
" cvt.f32.f16 tmp, low;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" cvt.f32.f16 tmp, high;\n\t"
" add.f32 %0, tmp, %0;\n\t"
"}\n\t"
: "+f"(gemm_k_reduction[n_serpentine])
: "r"(tmp[n_serpentine * 2]), "r"(tmp[n_serpentine * 2 + 1]));
} else if (platform::is_same<ElementB, cutlass::bfloat16_t>::value) {
asm volatile(
"{\n\t"
" .reg .f32 tmp;\n\t"
" shl.b32 tmp, %1, 16;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" and.b32 tmp, %1, 0xffff0000;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" shl.b32 tmp, %2, 16;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" and.b32 tmp, %2, 0xffff0000;\n\t"
" add.f32 %0, tmp, %0;\n\t"
"}\n\t"
: "+f"(gemm_k_reduction[n_serpentine])
: "r"(tmp[n_serpentine * 2]), "r"(tmp[n_serpentine * 2 + 1]));
} else {
assert(0);
}
#endif
}
if (kReduceKForA && (n == 0)) {
#if 0
gemm_k_reduction[m * 2] += float(A[m * 8]);
gemm_k_reduction[m * 2] += float(A[m * 8 + 1]);
gemm_k_reduction[m * 2] += float(A[m * 8 + 4]);
gemm_k_reduction[m * 2] += float(A[m * 8 + 5]);
gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 2]);
gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 3]);
gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 6]);
gemm_k_reduction[m * 2 + 1] += float(A[m * 8 + 7]);
#else
uint32_t const *tmp = reinterpret_cast<uint32_t const *>(&A);
if (platform::is_same<ElementA, cutlass::half_t>::value) {
asm volatile(
"{\n\t"
" .reg .f16 low, high;\n\t"
" .reg .f32 tmp;\n\t"
" mov.b32 {low, high}, %2;\n\t"
" cvt.f32.f16 tmp, low;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" cvt.f32.f16 tmp, high;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" mov.b32 {low, high}, %3;\n\t"
" cvt.f32.f16 tmp, low;\n\t"
" add.f32 %1, tmp, %1;\n\t"
" cvt.f32.f16 tmp, high;\n\t"
" add.f32 %1, tmp, %1;\n\t"
" mov.b32 {low, high}, %4;\n\t"
" cvt.f32.f16 tmp, low;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" cvt.f32.f16 tmp, high;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" mov.b32 {low, high}, %5;\n\t"
" cvt.f32.f16 tmp, low;\n\t"
" add.f32 %1, tmp, %1;\n\t"
" cvt.f32.f16 tmp, high;\n\t"
" add.f32 %1, tmp, %1;\n\t"
"}\n\t"
: "+f"(gemm_k_reduction[m * 2]), "+f"(gemm_k_reduction[m * 2 + 1])
: "r"(tmp[m * 4]), "r"(tmp[m * 4 + 1]),"r"(tmp[m * 4 + 2]), "r"(tmp[m * 4 + 3]));
} else if (platform::is_same<ElementA, cutlass::bfloat16_t>::value) {
asm volatile(
"{\n\t"
" .reg .f32 tmp;\n\t"
" shl.b32 tmp, %2, 16;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" and.b32 tmp, %2, 0xffff0000;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" shl.b32 tmp, %3, 16;\n\t"
" add.f32 %1, tmp, %1;\n\t"
" and.b32 tmp, %3, 0xffff0000;\n\t"
" add.f32 %1, tmp, %1;\n\t"
" shl.b32 tmp, %4, 16;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" and.b32 tmp, %4, 0xffff0000;\n\t"
" add.f32 %0, tmp, %0;\n\t"
" shl.b32 tmp, %5, 16;\n\t"
" add.f32 %1, tmp, %1;\n\t"
" and.b32 tmp, %5, 0xffff0000;\n\t"
" add.f32 %1, tmp, %1;\n\t"
"}\n\t"
: "+f"(gemm_k_reduction[m * 2]), "+f"(gemm_k_reduction[m * 2 + 1])
: "r"(tmp[m * 4]), "r"(tmp[m * 4 + 1]),"r"(tmp[m * 4 + 2]), "r"(tmp[m * 4 + 3]));
} else {
assert(0);
}
#endif
}
}
}
#else
assert(0);
#endif
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
//
// Define conversions from source type to instruction type
//
FloatRoundStyle const kRoundA =
PreferredRoundingMode<typename ArchMmaOperator::ElementA,
ElementA>::kRound;
FloatRoundStyle const kRoundB =
PreferredRoundingMode<typename ArchMmaOperator::ElementB,
ElementB>::kRound;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)
detail::ConvertAndPack<typename ArchMmaOperator::ElementA, ElementA,
FragmentA::kElements, kRoundA>
convert_A;
NumericArrayConverter<typename ArchMmaOperator::ElementB, ElementB,
FragmentB::kElements / 2, kRoundB>
convert_B;
Array<ElementB, FragmentB::kElements / 2> const *ptr_B =
reinterpret_cast<Array<ElementB, FragmentB::kElements / 2> const *>(&B);
Array<typename ArchMmaOperator::ElementB, FragmentB::kElements / 2> *
ptr_dst_B = reinterpret_cast<Array<typename ArchMmaOperator::ElementB,
FragmentB::kElements / 2> *>(&dst_B);
dst_A = convert_A(A);
ptr_dst_B[0] = convert_B(ptr_B[0]);
ptr_dst_B[1] = convert_B(ptr_B[1]);
#elif defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
detail::ConvertAndPack<typename ArchMmaOperator::ElementA, ElementA,
FragmentA::kElements / 2, kRoundA>
convert_A;
NumericArrayConverter<typename ArchMmaOperator::ElementB, ElementB,
FragmentB::kElements, kRoundB>
convert_B;
Array<ElementA, FragmentA::kElements / 2> const *ptr_A =
reinterpret_cast<Array<ElementA, FragmentA::kElements / 2> const *>(&A);
Array<typename ArchMmaOperator::ElementA, FragmentA::kElements / 2> *
ptr_dst_A = reinterpret_cast<Array<typename ArchMmaOperator::ElementA,
FragmentA::kElements / 2> *>(&dst_A);
dst_B = convert_B(B);
ptr_dst_A[0] = convert_A(ptr_A[0]);
ptr_dst_A[1] = convert_A(ptr_A[1]);
#else
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/mma_with_reduction_tensor_op.h/0 | {
"file_path": "include/cutlass/gemm/warp/mma_with_reduction_tensor_op.h",
"repo_id": "include",
"token_count": 7946
} | 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
#include "cutlass/layout/pitch_linear.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace layout {
// template <
// int ElementSize,
// gemm::Operand Operand
// >
// struct VoltaTensorOpMultiplicandCongruous;
// template <
// int ElementSize,
// gemm::Operand Operand
// >
// struct ColumnMajorVoltaTensorOpMultiplicandCongruous;
// template <
// int ElementSize,
// gemm::Operand Operand
// >
// struct RowMajorVoltaTensorOpMultiplicandCongruous;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template based on element size (in bits) - defined in terms of pitch-linear memory.
template <int ElementSize>
struct VoltaTensorOpMultiplicandCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
/// This layout is optimized for 128b accesses
static int const kAccessSize = 128;
/// Fundamental tile shape in units of vectors
using TileShape = PitchLinearShape<8, 4>;
/// Fundamental partition shape in units of vectors
using PartitionShape = PitchLinearShape<8, 2>;
//
// Static constants
//
static int const kElementSize = ElementSize;
static int const kElementsPerAccess = kAccessSize / kElementSize;
using PartitionCount = PitchLinearShape<
TileShape::kContiguous / PartitionShape::kContiguous,
TileShape::kStrided / PartitionShape::kStrided
>;
using AccessCount = PitchLinearShape<
PartitionShape::kContiguous,
PartitionShape::kStrided
>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandCongruous(Index ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandCongruous(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static VoltaTensorOpMultiplicandCongruous packed(TensorCoord const &extent) {
return VoltaTensorOpMultiplicandCongruous(extent[0]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
// First, compute c and s of vector within source (in units of vector accesses)
int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess;
int vec_strided_idx = coord.strided();
// Compute the fundamental tile being accessed
int tile_contiguous_idx = vec_contiguous_idx / TileShape::kContiguous;
int tile_strided_idx = vec_strided_idx / TileShape::kStrided;
int tile_contiguous_residual = vec_contiguous_idx % TileShape::kContiguous;
int tile_strided_residual = vec_strided_idx % TileShape::kStrided;
// Then swizzle in a tile
// Swizzle pattern is (tid[2:0] << 2)|(tid[4:3] ^ tid[2:1])
int permuted_strided_within_tile = (tile_contiguous_residual >> 1);
int permuted_contiguous_within_tile = (tile_strided_residual ^ permuted_strided_within_tile) |
((tile_contiguous_residual & 1) << 2);
// Compute final element location
int element_contiguous = (tile_contiguous_idx * TileShape::kContiguous +
permuted_contiguous_within_tile) * kElementsPerAccess + (coord.contiguous() % kElementsPerAccess);
int element_strided = tile_strided_idx * TileShape::kStrided + permuted_strided_within_tile;
return element_contiguous + element_strided * stride_[0];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent[1] * stride_[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template mapping a column-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous
template <int ElementSize>
struct ColumnMajorVoltaTensorOpMultiplicandCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandCongruous<ElementSize>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorVoltaTensorOpMultiplicandCongruous packed(TensorCoord const &extent) {
return ColumnMajorVoltaTensorOpMultiplicandCongruous(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.row(), coord.column()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.contiguous(), coord.strided());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.row(), extent.column()));
}
};
/// Template mapping a row-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous
template <int ElementSize>
struct RowMajorVoltaTensorOpMultiplicandCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandCongruous<ElementSize>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorVoltaTensorOpMultiplicandCongruous packed(TensorCoord const &extent) {
return RowMajorVoltaTensorOpMultiplicandCongruous(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.column(), coord.row()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.strided(), coord.contiguous());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.column(), extent.row()));
}
};
/// Template based on element size (in bits) - defined in terms of pitch-linear memory.
// template <int ElementSize, Operand Operand>
template <int ElementSize>
struct VoltaTensorOpMultiplicandBCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
/// This layout is optimized for 128b accesses
static int const kAccessSize = 128;
/// Fundamental tile shape in units of vectors
using TileShape = PitchLinearShape<8, 4>;
/// Fundamental partition shape in units of vectors
using PartitionShape = PitchLinearShape<4, 4>;
//
// Static constants
//
static int const kElementSize = ElementSize;
static int const kElementsPerAccess = kAccessSize / kElementSize;
using PartitionCount = PitchLinearShape<
TileShape::kContiguous / PartitionShape::kContiguous,
TileShape::kStrided / PartitionShape::kStrided
>;
using AccessCount = PitchLinearShape<
PartitionShape::kContiguous,
PartitionShape::kStrided
>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandBCongruous(Index ldm = 0): stride_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandBCongruous(Stride stride): stride_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static VoltaTensorOpMultiplicandBCongruous packed(TensorCoord const &extent) {
return VoltaTensorOpMultiplicandBCongruous(extent[0]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
// First, compute c and s of vector within source (in units of vector accesses)
int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess;
int vec_strided_idx = coord.strided();
// Compute the fundamental tile being accessed
int tile_contiguous_idx = vec_contiguous_idx / TileShape::kContiguous;
int tile_strided_idx = vec_strided_idx / TileShape::kStrided;
int tile_contiguous_residual = vec_contiguous_idx % TileShape::kContiguous;
int tile_strided_residual = vec_strided_idx % TileShape::kStrided;
// Then swizzle in a tile
// Swizzle pattern is (tid[1:0] << 3)|(tid & 0x4)|(tid[1:0])
int permuted_strided_within_tile = (tile_contiguous_residual & 0x3);
int permuted_contiguous_within_tile = (tile_strided_residual ^ permuted_strided_within_tile) |
(tile_contiguous_residual & 0x4);
// Compute final element location
int element_contiguous = (tile_contiguous_idx * TileShape::kContiguous +
permuted_contiguous_within_tile) * kElementsPerAccess + (coord.contiguous() % kElementsPerAccess);
int element_strided = tile_strided_idx * TileShape::kStrided + permuted_strided_within_tile;
return element_contiguous + element_strided * stride_[0];
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent[1] * stride_[0];
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template mapping a column-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous
template <int ElementSize>
struct ColumnMajorVoltaTensorOpMultiplicandBCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandBCongruous<ElementSize>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandBCongruous(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandBCongruous(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorVoltaTensorOpMultiplicandBCongruous packed(TensorCoord const &extent) {
return ColumnMajorVoltaTensorOpMultiplicandBCongruous(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.row(), coord.column()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.contiguous(), coord.strided());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.row(), extent.column()));
}
};
/// Template mapping a row-major view of pitch-linear memory to VoltaTensorOpMultiplicandCongruous
template <int ElementSize>
struct RowMajorVoltaTensorOpMultiplicandBCongruous {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandBCongruous<ElementSize>;
/// This layout is optimized for 128b accesses
static int const kAccessSize = Base::kAccessSize;
using TileShape = typename Base::TileShape;
using PartitionShape = typename Base::PartitionShape;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
using PartitionCount = typename Base::PartitionCount;
using AccessCount = typename Base::AccessCount;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandBCongruous(Index ldm = 0): layout_(ldm) { }
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandBCongruous(Stride stride): layout_(stride) { }
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorVoltaTensorOpMultiplicandBCongruous packed(TensorCoord const &extent) {
return RowMajorVoltaTensorOpMultiplicandBCongruous(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.column(), coord.row()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.strided(), coord.contiguous());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.column(), extent.row()));
}
};
/// Template based on element size (in bits) - defined in terms of pitch-linear
/// memory and KBlock size (in elements).
template <int ElementSize, int KBlock>
struct VoltaTensorOpMultiplicandCrosswise {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = PitchLinearCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
/// This layout is optimized for 64b accesses
static int const kAccessSize = 64;
//
// Static constants
//
static int const kElementSize = ElementSize;
static int const kElementsPerAccess = kAccessSize / kElementSize;
static int const kKBlock = KBlock;
private:
//
// Data members
//
/// Stride data member. For GEMM, it equals to KBlock x stage.
Stride stride_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandCrosswise(Index ldm = 0) : stride_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
VoltaTensorOpMultiplicandCrosswise(Stride stride) : stride_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static VoltaTensorOpMultiplicandCrosswise packed(TensorCoord const &extent) {
return VoltaTensorOpMultiplicandCrosswise(extent[1]);
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
//
// First, compute c and s of vector within source (in units of vector
// accesses)
//
int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess;
int vec_strided_idx = coord.strided();
//
// Then swizzle
// The mapping is like this:
// id[1:0]|(id[3]^id[4])|id[2]
int vec_strided_within_tile = vec_contiguous_idx & 0x7;
int permuted_vec_contiguous =
(vec_strided_idx & (~0xF)) + (vec_strided_idx & 0x3) * 4 +
(((vec_strided_idx >> 2) ^ ((vec_strided_idx & 0x10) >> 3)) & 0x3);
permuted_vec_contiguous ^= ((vec_strided_within_tile >> 1) & 0x3);
int permuted_vec_strided = vec_contiguous_idx;
//
// Compute final element location
//
int element_contiguous = permuted_vec_contiguous * kElementsPerAccess +
(coord.contiguous() % kElementsPerAccess);
return element_contiguous + permuted_vec_strided * (stride_[0] * kElementsPerAccess);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return stride_; }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return stride_; }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return extent[0] * stride_[0];
}
};
/// Template mapping a column-major view of pitch-linear memory to
/// VoltaTensorOpMultiplicandCrosswise
template <int ElementSize, int KBlock>
struct ColumnMajorVoltaTensorOpMultiplicandCrosswise {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandCrosswise<ElementSize, KBlock>;
/// This layout is optimized for 64b accesses
static int const kAccessSize = Base::kAccessSize;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
ColumnMajorVoltaTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static ColumnMajorVoltaTensorOpMultiplicandCrosswise packed(
TensorCoord const &extent) {
return ColumnMajorVoltaTensorOpMultiplicandCrosswise(extent.column());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.row(), coord.column()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.contiguous(), coord.strided());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return layout_.stride(); }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return layout_.stride(); }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.row(), extent.column()));
}
};
/// Template mapping a row-major view of pitch-linear memory to
/// TensorOpMultiplicandCrosswise
template <int ElementSize, int KBlock>
struct RowMajorVoltaTensorOpMultiplicandCrosswise {
/// Logical rank of tensor
static int const kRank = 2;
/// Rank of stride vector
static int const kStrideRank = 1;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = MatrixCoord;
/// Stride vector
using Stride = Coord<kStrideRank, Index, LongIndex>;
//
// Invariants
//
using Base = VoltaTensorOpMultiplicandCrosswise<ElementSize, KBlock>;
/// This layout is optimized for 64b accesses
static int const kAccessSize = Base::kAccessSize;
//
// Static constants
//
static int const kElementSize = Base::kElementSize;
static int const kElementsPerAccess = Base::kElementsPerAccess;
private:
//
// Data members
//
Base layout_;
public:
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {}
/// Ctor
CUTLASS_HOST_DEVICE
RowMajorVoltaTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {}
/// Helper returns a layout to a tightly packed tensor
CUTLASS_HOST_DEVICE
static RowMajorVoltaTensorOpMultiplicandCrosswise packed(
TensorCoord const &extent) {
return RowMajorVoltaTensorOpMultiplicandCrosswise(extent.row());
}
/// Returns the offset of a coordinate in linear memory.
/// Assumes coordinate has convention (contiguous, strided)
CUTLASS_HOST_DEVICE
LongIndex operator()(TensorCoord const &coord) const {
return layout_(PitchLinearCoord(coord.column(), coord.row()));
}
/// Inverse of layout function, mapping linear offset to logical coordinate
CUTLASS_HOST_DEVICE
TensorCoord inverse(LongIndex offset) const {
PitchLinearCoord coord = layout_.inverse(offset);
return MatrixCoord(coord.strided(), coord.contiguous());
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const { return layout_.stride(); }
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride &stride() { return layout_.stride(); }
/// Compute the number of contiguous elements needed to store a tensor with
/// the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &extent) const {
return layout_.capacity(PitchLinearCoord(extent.column(), extent.row()));
}
};
} // namespace layout
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/layout/tensor_op_multiplicand_sm70.h/0 | {
"file_path": "include/cutlass/layout/tensor_op_multiplicand_sm70.h",
"repo_id": "include",
"token_count": 9595
} | 39 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a structure containing strides, bounds, and a pointer to tensor data.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
#include "cutlass/platform/platform.h"
#include "cutlass/subbyte_reference.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Default layout function from coordinates in a tensor's index space into the n-D array held
/// in memory.
///
/// All layout functions must define at least the members shown in IdentityTensorLayout<>.
template <int Rank>
class IdentityTensorLayout {
public:
/// Logical rank of tensor
static int const kRank = Rank;
/// Rank of stride vector
static int const kStrideRank = Rank;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Coord<kRank, Index>;
/// Stride vector
using Stride = Coord<kStrideRank, Index>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
CUTLASS_HOST_DEVICE
IdentityTensorLayout(Stride const &stride = Stride()): stride_(stride) { }
/// Returns the offset of a coordinate in linear memory
CUTLASS_HOST_DEVICE
LongIndex operator()(Coord<Rank> const &coord) const {
return coord.dot(stride_);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &size) const {
int idx = stride_.max_dim_index();
return stride_[idx] * size[idx];
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/* \brief TensorRef is a template for objects pointing to the start of tensors of arbitrary rank
and layout within memory. A TensorRef combines a pointer and a Layout concept
Examples:
(These examples use helpers for matrix layouts defined in cutlass/layout/matrix.h)
1. Column-major matrix may be represented as a rank=2 tensor:
TensorRef<float, layout::ColumnMajor> A(ptr_A, ldm);
2. Row-major matrix may be represented as a rank=2 tensor:
TensorRef<float, layout::RowMajor> B(ptr_A, ldm);
3. An interleaved matrix may be represented as a rank=2 tensor:
TensorRef<int8_t, layout::ColumnMajorInterleaved<32> > C;
4. A helper exists to define a TensorRef for a contiguous matrix whose layout
is not known at compile time.
int ldm; // leading dimension
layout::Matrix kind; // Could be layout::Matrix::kRowMajor or layout::Matrix::kColumnMajor
TensorRef<int, layout::ContiguousMatrix> E(ptr_E, {ldm, kind});
*/
template <
/// Data type of element stored within tensor (concept: NumericType)
typename Element_,
/// Defines a mapping from logical coordinate to linear memory (concept: Layout)
typename Layout_
>
class TensorRef {
public:
/// Data type of individual access
using Element = Element_;
/// Mapping function from logical coordinate to linear memory
using Layout = Layout_;
/// Reference type to an element
using Reference = typename platform::conditional<
sizeof_bits<Element>::value >= 8,
Element &,
SubbyteReference<Element>
>::type;
/// Logical rank of tensor index space
static int const kRank = Layout::kRank;
/// Index type
using Index = typename Layout::Index;
/// Long index used for pointer offsets
using LongIndex = typename Layout::LongIndex;
/// Coordinate in logical tensor space
using TensorCoord = typename Layout::TensorCoord;
/// Layout's stride vector
using Stride = typename Layout::Stride;
/// TensorRef to constant data
using ConstTensorRef = TensorRef<
typename platform::remove_const<Element>::type const,
Layout>;
/// TensorRef to non-constant data
using NonConstTensorRef = TensorRef<
typename platform::remove_const<Element>::type,
Layout>;
/// Require at least rank=1. Mathematically, a rank=0 tensor would be considered to be a
/// scalar, but degenerate cases such as these are difficult to accommodate without
/// extensive C++ metaprogramming or support for zero-length arrays.
static_assert(kRank > 0, "Cannot define a zero-rank TensorRef");
private:
/// Pointer
Element* ptr_;
/// Layout object maps logical coordinates to linear offsets
Layout layout_;
public:
//
// Methods
//
/// Constructs a TensorRef with a pointer and layout object.
CUTLASS_HOST_DEVICE
TensorRef(): ptr_(nullptr) {
}
/// Constructs a TensorRef with a pointer and layout object.
CUTLASS_HOST_DEVICE
TensorRef(
Element *ptr, ///< pointer to start of tensor
Layout const &layout ///< layout object containing stride and mapping function
):
ptr_(ptr), layout_(layout) {
}
/// Converting constructor from TensorRef to non-constant data.
template<typename _Magic = int>
CUTLASS_HOST_DEVICE
TensorRef(
NonConstTensorRef const &ref, ///< TensorRef to non-const data
///SFINAE trick to avoid creating a copy-constructor when Element_ is already non-const
_Magic magic = (typename platform::enable_if< ! platform::is_same<NonConstTensorRef, TensorRef<Element_, Layout_> >::value, _Magic>::type)0
):
ptr_(ref.data()), layout_(ref.layout()) { }
/// Returns a reference to constant-valued tensor.
CUTLASS_HOST_DEVICE
ConstTensorRef const_ref() const {
return ConstTensorRef(ptr_, layout_);
}
CUTLASS_HOST_DEVICE
NonConstTensorRef non_const_ref() const {
return NonConstTensorRef(const_cast<typename platform::remove_const<Element>::type *>(ptr_), layout_);
}
/// Updates only the pointer
CUTLASS_HOST_DEVICE
void reset(Element* ptr = nullptr) {
ptr_ = ptr;
}
/// Updates the pointer and layout object
CUTLASS_HOST_DEVICE
void reset(Element* ptr, Layout const &layout) {
ptr_ = ptr;
layout_ = layout;
}
/// Returns true if the TensorRef is non-null
CUTLASS_HOST_DEVICE
bool good() const {
return ptr_ != nullptr;
}
/// Returns the pointer to referenced data
CUTLASS_HOST_DEVICE
Element * data() const { return ptr_; }
/// Returns a reference to the element at a given linear index
CUTLASS_HOST_DEVICE
Reference data(LongIndex idx) const {
return ReferenceFactory<typename platform::remove_const<Element>::type,
(sizeof_bits<Element>::value < 8)>::get(ptr_, idx);
}
/// Returns the layout object
CUTLASS_HOST_DEVICE
Layout & layout() {
return layout_;
}
/// Returns the layout object
CUTLASS_HOST_DEVICE
Layout layout() const {
return layout_;
}
/// Returns the layout object's stride vector
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the layout object's stride vector
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Returns the layout object's stride in a given physical dimension
CUTLASS_HOST_DEVICE
typename Layout::Stride::Index stride(int dim) const {
return layout_.stride().at(dim);
}
/// Returns the layout object's stride in a given physical dimension
CUTLASS_HOST_DEVICE
typename Layout::Stride::Index & stride(int dim) {
return layout_.stride().at(dim);
}
/// Computes the offset of an index from the origin of the tensor
CUTLASS_HOST_DEVICE
LongIndex offset(TensorCoord const& coord) const {
return layout_(coord);
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference at(TensorCoord const& coord) const {
return data(offset(coord));
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference operator[](TensorCoord const& coord) const {
return data(offset(coord));
}
/// Adds an offset to each pointer
CUTLASS_HOST_DEVICE
TensorRef & add_pointer_offset(LongIndex offset_) {
ptr_ += offset_;
return *this;
}
/// Adds an offset to each pointer
CUTLASS_HOST_DEVICE
TensorRef & add_coord_offset(TensorCoord const &coord) {
add_pointer_offset(offset(coord));
return *this;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRef operator+(TensorCoord const& b) const {
TensorRef result(*this);
result.add_coord_offset(b);
return result;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRef & operator+=(TensorCoord const& b) {
add_coord_offset(b);
return *this;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRef operator-(TensorCoord const& b) const {
TensorRef result(*this);
result.add_pointer_offset(-offset(b));
return result;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRef & operator-=(TensorCoord const& b) {
add_pointer_offset(-offset(b));
return *this;
}
};
/// Constructs a TensorRef, deducing types from arguments.
template <
typename Element,
typename Layout
>
CUTLASS_HOST_DEVICE
TensorRef<Element, Layout> make_TensorRef(Element *ptr, Layout const &layout) {
return TensorRef<Element, Layout>(ptr, layout);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations to handle degenerate and sub-byte cases.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Element,
typename Layout
>
CUTLASS_HOST_DEVICE
bool TensorRef_aligned(TensorRef<Element, Layout> const &ref, int alignment) {
int const kStrideRank = Layout::kStrideRank;
if (reinterpret_cast<uintptr_t>(ref.data()) % alignment) {
return false;
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStrideRank; ++i) {
if (ref.stride(i) % alignment) {
return false;
}
}
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| include/cutlass/tensor_ref.h/0 | {
"file_path": "include/cutlass/tensor_ref.h",
"repo_id": "include",
"token_count": 3815
} | 40 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates calculating the address and predicates to the load of scale and bias vectors.
This iterator uses masks to guard out-of-bounds accesses.
It can be used to load the gamma and beta vectors of layernorm which is loop variant.
A precomputed "Params" object minimizes the amount of state that must be
stored in registers, and integer addition is used to advance the pointer
through memory.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/conv/threadblock/conv2d_params.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedScaleBiasVectorAccessIterator
///
template <typename ThreadblockShape,
typename Element,
typename Layout>
class PredicatedScaleBiasVectorAccessIterator;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for fprop pitch-linear data.
///
template <typename ThreadblockShape_, typename Element_>
class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_,
Element_,
layout::PitchLinear> {
public:
using ThreadblockShape = ThreadblockShape_;
using Element = Element_;
using Layout = layout::PitchLinear;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
static int const kThreads = ThreadblockShape::kContiguous / kElementsPerAccess;
using AccessType = AlignedArray<Element, kElementsPerAccess>;
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Internal pointer to first access of tile
BytePointer pointer_;
TensorCoord thread_offset_;
int problem_size_k_;
/// Used for out-of-order visitation
bool is_residue_tile_;
bool guard_;
TensorCoord::Index residue_size_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Extent of tensor
int problem_size_k,
/// Pointer to the start of the scale vector
ConstPointer scale_pointer,
/// Pointer to the start of the bias vector
ConstPointer bias_pointer,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset) {
pointer_ = (thread_id < kThreads)
? reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(scale_pointer))
: reinterpret_cast<BytePointer>(
const_cast<NonConstPointer>(bias_pointer));
// Per-thread offset in logical coordinates of tensor
int thread_base = (thread_id < kThreads) ? 0 : kThreads;
problem_size_k_ = problem_size_k;
is_residue_tile_ = true;
residue_size_ = (problem_size_k_ - threadblock_offset.contiguous()) % ThreadblockShape::kContiguous;
if (residue_size_ == 0) {
residue_size_ = ThreadblockShape::kContiguous;
}
guard_ = ((thread_id - thread_base) * kElementsPerAccess) < residue_size_;
thread_offset_ =
threadblock_offset +
TensorCoord((thread_id - thread_base) * kElementsPerAccess, 0);
set_iteration_index(0);
}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
/// Extent of tensor
int problem_size_k,
/// Pointer to start of scale vector
ConstPointer scale_pointer,
/// Pointer to start of scale vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id)
: PredicatedScaleBiasVectorAccessIterator(problem_size_k,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Overrides the internal iteration index
CUTLASS_HOST_DEVICE
void set_iteration_index(int index) {}
/// Advances an iterator along logical dimensions of matrix in units of whole threadblock tiles
CUTLASS_DEVICE
void add_tile_offset(
TensorCoord const &tile_offset) {
guard_ = threadIdx.x < kThreads * 2;
TensorCoord offset = is_residue_tile_ ?
TensorCoord(residue_size_ + ThreadblockShape::kContiguous * (tile_offset.contiguous() - 1), 0)
: TensorCoord(ThreadblockShape::kContiguous * tile_offset.contiguous(), 0);
thread_offset_ =
thread_offset_ +
offset;
is_residue_tile_ = false;
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(
pointer_ +
(thread_offset_.contiguous() * sizeof_bits<Element>::value / 8));
}
/// Increment and return an instance to self.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator &operator++() {
return *this;
}
/// Increment and return an instance to self.
CUTLASS_DEVICE
PredicatedScaleBiasVectorAccessIterator operator++(int) {
PredicatedScaleBiasVectorAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
guard_ &= (!enable);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return guard_;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileAccessIterator for row-major data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename ThreadblockShape_,
typename Element_>
class PredicatedScaleBiasVectorAccessIterator<ThreadblockShape_,
Element_,
layout::RowMajor> {
public:
using ThreadblockShape = ThreadblockShape_;
using Element = Element_;
using Layout = layout::RowMajor;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using ConstPointer = const Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedScaleBiasVectorAccessIterator<
layout::PitchLinearShape<ThreadblockShape::kColumn, ThreadblockShape::kRow>,
Element,
layout::PitchLinear>;
using AccessType = typename UnderlyingIterator::AccessType;
static int const kElementsPerAccess = UnderlyingIterator::kElementsPerAccess;
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
///< Extent of tensor
int problem_size_k,
///< Pointer to the start of the scale vector
ConstPointer scale_pointer,
///< Pointer to the start of the bias vector
ConstPointer bias_pointer,
///< ID of each participating thread
int thread_id,
///< Initial offset of threadblock
TensorCoord const &threadblock_offset)
: iterator_(problem_size_k, scale_pointer, bias_pointer,
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(),
threadblock_offset.row())) {}
/// Construct a PredicatedTileAccessIterator with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator(
int problem_size_k, ///< Extent of tensor
ConstPointer scale_pointer, ///< Pointer to the start of the scale vector
ConstPointer bias_pointer, ///< Pointer to the start of the bias vector
int thread_id ///< ID of each participating thread
)
: PredicatedScaleBiasVectorAccessIterator(problem_size_k,
scale_pointer, bias_pointer,
thread_id, make_Coord(0, 0)) {}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// threadblock tiles
CUTLASS_HOST_DEVICE
void add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
}
/// Returns a pointer
CUTLASS_HOST_DEVICE
AccessType *get() const {
return reinterpret_cast<AccessType *>(iterator_.get());
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedScaleBiasVectorAccessIterator operator++(int) {
PredicatedScaleBiasVectorAccessIterator self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Returns whether access is valid or not
CUTLASS_HOST_DEVICE
bool valid() {
return iterator_.valid();
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/predicated_scale_bias_vector_access_iterator.h/0 | {
"file_path": "include/cutlass/transform/threadblock/predicated_scale_bias_vector_access_iterator.h",
"repo_id": "include",
"token_count": 4311
} | 41 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import ctypes
from cutlass_library import (
DataType,
KernelScheduleType,
TileSchedulerType
)
from cutlass.backend.library import DataTypeSizeBytes
class GemmCoord_(ctypes.Structure):
_fields_ = [
("m", ctypes.c_int),
("n", ctypes.c_int),
("k", ctypes.c_int)
]
def __init__(self, m, n, k) -> None:
self.m = m
self.n = n
self.k = k
class GemmCoordBatched_(ctypes.Structure):
"""
Wrapper around a GemmCoord that also contains batch count. This is used for encoding
batched GEMM inputs to CUTLASS 3 GEMMs.
"""
_fields_ = [
("m", ctypes.c_int),
("n", ctypes.c_int),
("k", ctypes.c_int),
("batch_count", ctypes.c_int)
]
def __init__(self, gemm_coord, batch_count) -> None:
self.m = gemm_coord.m
self.n = gemm_coord.n
self.k = gemm_coord.k
self.batch_count = batch_count
class MatrixCoord_(ctypes.Structure):
_fields_ = [
("row", ctypes.c_int),
("column", ctypes.c_int)
]
class dim3_(ctypes.Structure):
_fields_ = [
("x", ctypes.c_int),
("y", ctypes.c_int),
("z", ctypes.c_int)
]
class StrideBatched_(ctypes.Structure):
"""
CUTLASS 3.0 strides for operands contain one static dimension and two variable dimensions. The
variable dimensions represent the stride along non-unit-stride dimension of the row/column major
layout, and the batch stride. This structure encodes the two variable dimensions.
"""
_fields_ = [
("major_stride", ctypes.c_int64),
("batch_stride", ctypes.c_int64)
]
class GenericMainloopArguments3x_(ctypes.Structure):
"""
Structure representing the superset of possible mainloop arguments.
This structure should not be passed to kernels directly, but, rather,
be used as an input to one of the more specific schedule arguments, which
will each select those arguments relevant to the particular schedule.
"""
_fields_ = [
("ptr_A", ctypes.c_void_p),
("stride_A", StrideBatched_),
("ptr_B", ctypes.c_void_p),
("stride_B", StrideBatched_),
("mma_promotion_interval", ctypes.c_int)
]
class _PersistentTileSchedulerArguments(ctypes.Structure):
_fields_ = [
("max_swizzle_size", ctypes.c_int),
("raster_order_option", ctypes.c_int),
]
class _PersistentTileSchedulerStreamKArguments(ctypes.Structure):
_fields_ = [
("splits", ctypes.c_int),
("max_swizzle_size", ctypes.c_int),
("raster_order_option", ctypes.c_int),
("reduction_mode", ctypes.c_int),
("decomposition_mode", ctypes.c_int),
]
def get_tile_scheduler_arguments_3x(
tile_scheduler: TileSchedulerType,
splits: int = 1):
max_swizzle_size = 1
raster_order_option = 0 # Heuristic
if tile_scheduler == TileSchedulerType.Persistent:
return _PersistentTileSchedulerArguments(
max_swizzle_size,
raster_order_option,
)
elif tile_scheduler == TileSchedulerType.StreamK:
reduction_mode = 0 # Deterministic
decomposition_mode = 0 # Heuristic
return _PersistentTileSchedulerStreamKArguments(
splits,
max_swizzle_size,
raster_order_option,
reduction_mode,
decomposition_mode,
)
def get_mainloop_arguments_3x(
kernel_schedule: KernelScheduleType,
element_A,
element_B,
alignment_A: int,
alignment_B: int) -> ctypes.Structure:
"""
Returns the ctypes structure to be used for the 3.x kernel's mainloop parameters.
:param kernel_schedule: type of kernel schedule to be used in the mainloop
:type kernel_schedule: cutlass_library.KernelScheduleType
:param element_A: data type of operand A
:param element_B: data type of operand B
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:returns: ctypes structure to be used for the 3.x kernel's mainloop parameters
:rtype: ctypes.Structure
"""
class _MainloopArgumentsTma(ctypes.Structure):
_fields_ = [
("ptr_A", ctypes.c_void_p),
("stride_A", StrideBatched_),
("ptr_B", ctypes.c_void_p),
("stride_B", StrideBatched_),
("mma_promotion_interval", ctypes.c_int)
]
@staticmethod
def from_generic_mainloop_args(args: GenericMainloopArguments3x_):
return _MainloopArgumentsTma(
args.ptr_A, args.stride_A, args.ptr_B, args.stride_B,
args.mma_promotion_interval
)
class _MainloopArgumentsMultistage(ctypes.Structure):
_fields_ = [
("ptr_A", ctypes.c_void_p),
("stride_A", StrideBatched_),
("ptr_B", ctypes.c_void_p),
("stride_B", StrideBatched_),
]
@staticmethod
def from_generic_mainloop_args(args: GenericMainloopArguments3x_):
return _MainloopArgumentsMultistage(
args.ptr_A, args.stride_A, args.ptr_B, args.stride_B,
)
# Currently all 3.x kernels (CpAsync and Tma) have the same argument structure.
# Should that become not the case, this is the place to return custom ctypes
# structures based on selected kernel schedule.
return _MainloopArgumentsTma
def get_gemm_arguments_3x(mainloop_arguments, epilogue_functor, scheduler_args, default_epilogue):
if not default_epilogue and hasattr(epilogue_functor, "epilogue_type_evt"):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type_evt
else:
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
if hasattr(epilogue_functor, "visitor"):
class _EpilogueArguments(ctypes.Structure):
_fields_ = [
("epilogue", _EpilogueOutputOpParams),
("arg_C", epilogue_functor.arg_c_type),
("arg_D", epilogue_functor.arg_d_type)
]
def __init__(self, output_op, ptr_c, stride_c, ptr_d, stride_d) -> None:
self.epilogue = output_op
self.arg_C = epilogue_functor.arg_c_type(ptr_c)
self.arg_D = epilogue_functor.arg_d_type(ptr_d)
else:
class _EpilogueArguments(ctypes.Structure):
_fields_ = [
("epilogue", _EpilogueOutputOpParams),
("ptr_C", ctypes.c_void_p),
("stride_C", StrideBatched_),
("ptr_D", ctypes.c_void_p),
("stride_D", StrideBatched_),
]
class _HardwareInfo(ctypes.Structure):
_fields_ = [
("device_id", ctypes.c_int),
("sm_count", ctypes.c_int),
]
class _GemmArguments(ctypes.Structure):
_fields_ = [
("mode", ctypes.c_int),
("problem_size", GemmCoordBatched_),
("mainloop", mainloop_arguments),
("epilogue", _EpilogueArguments),
("hw_info", _HardwareInfo),
("scheduler", type(scheduler_args)),
]
return _GemmArguments, _EpilogueArguments, _EpilogueOutputOpParams, _HardwareInfo
def get_gemm_arguments(epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _GemmArguments(ctypes.Structure):
_fields_ = [
# Arguments from UniversalArgumentsBase
("mode", ctypes.c_int),
("problem_size", GemmCoord_),
("batch_count", ctypes.c_int),
("batch_stride_D", ctypes.c_longlong),
# Remaining arguments
("epilogue", _EpilogueOutputOpParams),
("ptr_A", ctypes.c_void_p),
("ptr_B", ctypes.c_void_p),
("ptr_C", ctypes.c_void_p),
("ptr_D", ctypes.c_void_p),
("batch_stride_A", ctypes.c_longlong),
("batch_stride_B", ctypes.c_longlong),
("batch_stride_C", ctypes.c_longlong),
("stride_a", ctypes.c_longlong),
("stride_b", ctypes.c_longlong),
("stride_c", ctypes.c_longlong),
("stride_d", ctypes.c_longlong),
("lda", ctypes.c_longlong),
("ldb", ctypes.c_longlong),
("ldc", ctypes.c_longlong),
("ldd", ctypes.c_longlong),
("ptr_gather_A_indices", ctypes.c_void_p),
("ptr_gather_B_indices", ctypes.c_void_p),
("ptr_scatter_D_indices", ctypes.c_void_p)
]
return _GemmArguments, _EpilogueOutputOpParams
def get_gemm_arguments_streamk(epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _GemmArguments(ctypes.Structure):
_fields_ = [
("mode", ctypes.c_int),
("problem_size", GemmCoord_),
("batch_count", ctypes.c_int),
("epilogue", _EpilogueOutputOpParams),
("ptr_A", ctypes.c_void_p),
("ptr_B", ctypes.c_void_p),
("ptr_C", ctypes.c_void_p),
("ptr_D", ctypes.c_void_p),
("batch_stride_A", ctypes.c_longlong),
("batch_stride_B", ctypes.c_longlong),
("batch_stride_C", ctypes.c_longlong),
("batch_stride_D", ctypes.c_longlong),
("stride_a", ctypes.c_longlong),
("stride_b", ctypes.c_longlong),
("stride_c", ctypes.c_longlong),
("stride_d", ctypes.c_longlong),
("lda", ctypes.c_longlong),
("ldb", ctypes.c_longlong),
("ldc", ctypes.c_longlong),
("ldd", ctypes.c_longlong),
("avail_sms", ctypes.c_int)
]
return _GemmArguments, _EpilogueOutputOpParams
###########################################################################################
# GEMM Grouped
###########################################################################################
def get_gemm_grouped_arguments(epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _GEMMGroupedArguments(ctypes.Structure):
_fields_ = [
("problem_sizes", ctypes.c_void_p),
("problem_count", ctypes.c_int),
("threadblock_count", ctypes.c_int),
("output_op", _EpilogueOutputOpParams),
("ptr_A", ctypes.c_void_p),
("ptr_B", ctypes.c_void_p),
("ptr_C", ctypes.c_void_p),
("ptr_D", ctypes.c_void_p),
("lda", ctypes.c_void_p),
("ldb", ctypes.c_void_p),
("ldc", ctypes.c_void_p),
("ldd", ctypes.c_void_p),
("host_problem_sizes", ctypes.c_void_p)
]
return _GEMMGroupedArguments, _EpilogueOutputOpParams
############################################################################################
# Convolution2D
############################################################################################
class Conv2DProblemSize_(ctypes.Structure):
_fields_ = [
("N", ctypes.c_int),
("H", ctypes.c_int),
("W", ctypes.c_int),
("C", ctypes.c_int),
("P", ctypes.c_int),
("Q", ctypes.c_int),
("K", ctypes.c_int),
("R", ctypes.c_int),
("S", ctypes.c_int),
("pad_h", ctypes.c_int),
("pad_w", ctypes.c_int),
("stride_h", ctypes.c_int),
("stride_w", ctypes.c_int),
("dilation_h", ctypes.c_int),
("dilation_w", ctypes.c_int),
("mode", ctypes.c_int), # kCrossCorrelation: 0, kConvolution: 1
("split_k_slices", ctypes.c_int),
("groups", ctypes.c_int)
]
def __init__(self, problem_size) -> None:
for field_name, _ in self._fields_:
setattr(self, field_name, getattr(problem_size, field_name))
class Layout4D(ctypes.Structure):
_fields_ = [("stride", ctypes.c_int * 3)]
def __init__(self, tensor_ref):
stride = tensor_ref.stride()
setattr(self, "stride", (stride.at(0), stride.at(1), stride.at(2)))
class TensorRef_(ctypes.Structure):
_fields_ = [
("ptr", ctypes.c_void_p),
("layout", Layout4D)
]
def __init__(self, tensor_ref):
setattr(self, "ptr", tensor_ref.data())
setattr(self, "layout", Layout4D(tensor_ref.layout()))
class TensorRef2D_(ctypes.Structure):
_fields_ = [
("ptr", ctypes.c_void_p),
("stride", ctypes.c_int)
]
def get_conv2d_arguments(epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _Conv2dArguments(ctypes.Structure):
_fields_ = [
("conv_kind", ctypes.c_int),
("problem_size", Conv2DProblemSize_),
("ptr_A", ctypes.c_void_p),
("ptr_B", ctypes.c_void_p),
("ptr_C", ctypes.c_void_p),
("ptr_D", ctypes.c_void_p),
("tensor_C_numel", ctypes.c_int),
("output_op", _EpilogueOutputOpParams),
("split_k_mode", ctypes.c_int)
]
return _Conv2dArguments, _EpilogueOutputOpParams
############################################################################################
# Reduction
############################################################################################
def get_reduction_params(epilogue_functor):
_EpilogueOutputParams = epilogue_functor.epilogue_type
class _ReductionParams(ctypes.Structure):
_fields_ = [
("problem_size", MatrixCoord_),
("partitions", ctypes.c_int),
("partition_stride", ctypes.c_longlong),
("workspace", TensorRef2D_),
("destination", TensorRef2D_),
("source", TensorRef2D_),
("output_op", _EpilogueOutputParams),
]
return _ReductionParams, _EpilogueOutputParams
###########################################################################################
# Epilogue Visitor Type Factory
###########################################################################################
class Empty(ctypes.Structure):
_fields_ = []
def __init__(self, *arg) -> None:
pass
class EmptyByte(ctypes.Structure):
_fields_ = [
("byte", ctypes.c_byte)
]
def __init__(self, *arg) -> None:
pass
class EBO:
def __init__(self, index: int, type) -> None:
self.index = index
self.type = type
def __eq__(self, other) -> bool:
if isinstance(other, EBO):
return self.index == other.index and self.type == other.type
return False
def __hash__(self) -> int:
return hash((self.index, self.type))
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self) -> str:
return f"<{self.index}, {self.type}>"
def tuple_factory_(input_tuple, dtype, constants=[0,1]):
"""
The factory function generating cute::Tuple with input tuple
:param input_tuple: the input tuple
:type input_tuple: tuple
:param dtype: the data type for non-constant values
:type dtype: str, "int32_t", "int", "int64_t"
:param constant: the values that will be treated as constants
:type constant: list[int]
:return: ctype structure representing the cute::Tuple
:return: the empty base classes of the tuple
"""
# The empty base classes of the current tuple
empty_bases = []
# The first non empty base class
first_non_empty_base = None
# The ctype fields of the current tuple
ctype_fields = []
for idx, entry in enumerate(input_tuple):
# For nested tuples
if isinstance(entry, tuple):
sub_tuple_ctype, sub_empty_bases = tuple_factory_(entry, dtype, constants)
if ctypes.sizeof(sub_tuple_ctype) == 0:
# The empty tuple base class is also an empty EBO
empty_bases.append(EBO(idx, entry))
else:
if first_non_empty_base is None:
first_non_empty_base = sub_empty_bases
ctype_fields.append((f"entry_{idx}", sub_tuple_ctype))
else:
if entry in constants:
empty_bases.append(EBO(idx, entry))
ctype_fields.append((f"entry_{idx}", Empty))
else:
ctype_fields.append((f"entry_{idx}", dtype))
if first_non_empty_base is None:
first_non_empty_base = []
# Determine whether or not add an additional byte for empty base classes
additional_byte = False
# Special case for constant tuple
if first_non_empty_base is None:
additional_byte = False
else:
for base in first_non_empty_base:
if base in empty_bases:
additional_byte = True
break
if additional_byte:
ctype_fields = [("empty_byte", EmptyByte), ] + ctype_fields
# Create the ctype tuple
class TupleType(ctypes.Structure):
_fields_ = ctype_fields
def __init__(self, args) -> None:
if additional_byte:
fields = self._fields_[1:]
else:
fields = self._fields_
assert len(fields) == len(args)
for field, arg in zip(fields, args):
name = field[0]
field_type = field[1]
setattr(self, name, field_type(arg))
return TupleType, empty_bases
def tuple_factory(input_tuple, dtype: str, constants=[0,1]):
"""
The factory function generating cute::Tuple with input tuple
:param input_tuple: the input tuple
:type input_tuple: tuple
:param dtype: the data type for non-constant values
:type dtype: str, "int32_t", "int", "int64_t"
:param constant: the values that will be treated as constants
:type constant: list[int]
:return: ctype structure representing the cute::Tuple
:return: the empty base classes of the tuple
"""
# Step 1: convert the dtype
if dtype == "int64_t":
dtype = ctypes.c_longlong
elif dtype in ["int", "int32_t"]:
dtype = ctypes.c_int32
else:
raise NotImplementedError(f"Type {dtype} is not supported")
tuple_type, _ = tuple_factory_(input_tuple, dtype, constants)
if ctypes.sizeof(tuple_type) == 0:
return EmptyByte
return tuple_type
def visitor_factory(node_types, node_names):
"""
Creates the argument type of epilogue visitor type
:param node_types: list of argument types under ctypes
:param node_names: list of argument names under str
:return: tuple type in ctypes.Structure
"""
ctypes_field = []
# Struct is used when number of nodes < 4
# Because the Sm90VisitorImplBase has specification up to 4 nodes
# in `include/cutlass/epilogue/fusion/sm90_visitor_tma_warpspecialized.hpp`
if len(node_types) <= 4:
for idx, node_type in enumerate(node_types):
if ctypes.sizeof(node_type) == 0:
# Special case for empty struct
# 1 byte placeholder is used for correct alignment
ctypes_field.append((node_names[idx], ctypes.c_byte))
else:
ctypes_field.append((node_names[idx], node_type))
class VisitorType(ctypes.Structure):
_fields_ = ctypes_field
def __init__(self, kwargs) -> None:
for field in self._fields_:
fname, ftype = field
if ftype != ctypes.c_byte:
setattr(self, fname, ftype(kwargs))
# For cases with more than 4 nodes, tuple is used
else:
for idx, node_type in enumerate(node_types):
ctypes_field.append((node_names[idx], node_type))
class VisitorType(ctypes.Structure):
_fields_ = ctypes_field
def __init__(self, kwargs) -> None:
for field in self._fields_:
fname, ftype = field
setattr(self, fname, ftype(kwargs))
return VisitorType
| python/cutlass/backend/c_types.py/0 | {
"file_path": "python/cutlass/backend/c_types.py",
"repo_id": "python",
"token_count": 9749
} | 42 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for generating source for building a PyTorch CUDA extension that using a CUTLASS kernel.
If specified, the extension can be JIT compiled via PyTorch's ``cpp_extension.load`` method.
Example usage with JIT compilation:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass_library.LayoutType.RowMajor)
op = plan.construct()
mod = cutlass.emit.pytorch(op, 'cutlass_gemm', 80, jit=True)
# Generate inputs for the GEMM
A, B, C = [torch.ones((512, 512)).to('cuda') for _ in range(3)]
# Run the module
D = mod.run(A, B, C)
Example usage without JIT compilation:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass.LayoutType.RowMajor)
op = plan.construct()
cutlass.emit.pytorch(op, 'cutlass_gemm', 80, jit=False, sourcedir='output')
After this call, the directory ``output`` contains ``setup.py``,
``cutlass_gemm.cpp``, and ``cutlass_gemm_kernel.cu``. The module can be built from
within ``output`` by running: ``TORCH_CUDA_ARCH_LIST="8.0" python setup.py develop --user``.
The module can later be used in Python via:
.. highlight:: python
.. code-block:: python
import torch
import cutlass_gemm
# Generate inputs for the GEMM
A, B, C = [torch.ones((512, 512)).to('cuda') for _ in range(3)]
# Run the module
D = cutlass_gemm.run(A, B, C)
"""
import logging
import os
from cutlass_library import ConvKind, ConvKindNames, DataType, SubstituteTemplate
from cutlass import CUTLASS_PATH, logger, swizzle
from cutlass.backend.gemm_operation import GemmOperationGrouped, GemmOperationUniversal
from cutlass.backend.conv2d_operation import Conv2dOperation
from cutlass.backend.library import ApiVersion
from cutlass.emit import common
from cutlass.utils.datatypes import is_torch_available
if is_torch_available():
import torch
_PYTORCH_CUDA_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
#include <cuda_runtime.h>
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include "cutlass/cutlass.h"
#include "cutlass/util/device_memory.h"
// helper function allocating the memory
void* device_memory_allocation(size_t size, int device_id=0) {
if (size > 0) {
torch::Device device(torch::kCUDA, device_id);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
torch::TensorOptions options = torch::TensorOptions().dtype(torch::kI8).device(device);
at::Tensor device_tensor = torch::empty({(long)size,}, options);
return reinterpret_cast<void*>(device_tensor.data_ptr());
} else {
return nullptr;
}
}
${includes}
${declaration}
${impl}
"""
_PYTORCH_GEMM_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <pybind11/stl.h>
// CUDA forward declarations
at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt, float alpha=1.f, float beta=0.f);
// C++ interface
at::Tensor ${name}(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt, float alpha=1.f, float beta=0.f) {
return ${name}_kernel(A, B, C, alpha, beta);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("run", py::overload_cast<const at::Tensor&, const at::Tensor&, at::optional<const at::Tensor>, float, float>(&${name}), py::arg("A"), py::arg("B"), py::arg("C") = nullptr, py::arg("alpha") = 1.f, py::arg("beta") = 0.f);
}
"""
_PYTORCH_GROUPED_GEMM_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <pybind11/stl.h>
// CUDA forward declarations
std::vector<at::Tensor> ${name}_kernel(const std::vector<at::Tensor>& A, const std::vector<at::Tensor>& B, at::optional<const std::vector<at::Tensor>> C=at::nullopt, float alpha=1.f, float beta=0.f);
// C++ interface
std::vector<at::Tensor> ${name}(const std::vector<at::Tensor>& A, const std::vector<at::Tensor>& B, at::optional<const std::vector<at::Tensor>> C=at::nullopt, float alpha=1.f, float beta=0.f) {
return ${name}_kernel(A, B, C, alpha, beta);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("run", py::overload_cast<const std::vector<at::Tensor>&, const std::vector<at::Tensor>&, at::optional<const std::vector<at::Tensor>>, float, float>(&${name}),
py::arg("A"), py::arg("B"), py::arg("C") = nullptr, py::arg("alpha") = 1.f, py::arg("beta") = 0.f);
}
"""
_PYTORCH_CONV2D_FPROP_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <pybind11/stl.h>
// CUDA forward declarations
at::Tensor ${name}_kernel(
const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1},
float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1);
// C++ interface
at::Tensor ${name}(
const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1},
float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1) {
return ${name}_kernel(A, B, C, stride, padding, dilation, alpha, beta, split_k_mode, split_k_slices);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("run",
py::overload_cast<
const at::Tensor&, const at::Tensor&, at::optional<const at::Tensor>,
std::tuple<int, int>, std::tuple<int, int>, std::tuple<int, int>, float, float, std::string, int>(
&${name}), py::arg("A"), py::arg("B"), py::arg("C") = nullptr,
py::arg("stride") = std::make_tuple(1, 1), py::arg("padding") = std::make_tuple(1, 1), py::arg("dilation") = std::make_tuple(1, 1),
py::arg("alpha") = 1.f, py::arg("beta") = 0.f,
py::arg("split_k_mode") = "serial", py::arg("split_k_slices") = 1);
}
"""
_PYTORCH_CONV2D_GRAD_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <pybind11/stl.h>
// CUDA forward declarations
at::Tensor ${name}_kernel(
std::tuple<int, int, int, int> result_size, const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1},
float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1);
// C++ interface
at::Tensor ${name}(
std::tuple<int, int, int, int> result_size, const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1},
float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1) {
return ${name}_kernel(result_size, A, B, C, stride, padding, dilation, alpha, beta, split_k_mode, split_k_slices);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("run",
py::overload_cast<
std::tuple<int, int, int, int>, const at::Tensor&, const at::Tensor&, at::optional<const at::Tensor>,
std::tuple<int, int>, std::tuple<int, int>, std::tuple<int, int>, float, float, std::string, int>(
&${name}), py::arg("result_size"), py::arg("A"), py::arg("B"), py::arg("C") = nullptr,
py::arg("stride") = std::make_tuple(1, 1), py::arg("padding") = std::make_tuple(1, 1), py::arg("dilation") = std::make_tuple(1, 1),
py::arg("alpha") = 1.f, py::arg("beta") = 0.f,
py::arg("split_k_mode") = "serial", py::arg("split_k_slices") = 1);
}
"""
_PYTORCH_GEMM_INCLUDES = {
ApiVersion.v2x: """
#include "cutlass/gemm/device/gemm_universal.h"
""",
ApiVersion.v3x: """
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/util/packed_stride.hpp"
""",
}
_PYTORCH_GROUPED_GEMM_INCLUDES = """
#include "cutlass/gemm/kernel/default_gemm_grouped.h"
#include "cutlass/gemm/device/gemm_grouped.h"
"""
_PYTORCH_CONV2D_INCLUDES = """
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/kernel/default_conv2d_dgrad.h"
#include "cutlass/conv/kernel/default_conv2d_wgrad.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
"""
_CUTLASS_TYPE_TO_TORCH_TYPE = {
DataType.f16: "torch::kF16",
DataType.f32: "torch::kF32",
DataType.f64: "torch::kF64",
DataType.s8: "torch::I8",
DataType.s32: "torch::I32",
}
_PYTORCH_GEMM_IMPL_TEMPLATE_2x = (
common._CUTLASS_KERNEL_RUN_GEMM_2x
+ """
at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C, float alpha, float beta) {
int M = A.size(0);
int N = B.size(1);
int K = A.size(1);
typename DeviceKernel::ElementC* ptrC = (C == at::nullopt) ?
nullptr :
reinterpret_cast<typename DeviceKernel::ElementC*>(C->contiguous().data_ptr());
at::Tensor D = B.new_empty({M, N}, ${torch_type_C});
cutlass::Status status = ${name}_kernel_run(M, N, K,
reinterpret_cast<typename DeviceKernel::ElementA*>(A.contiguous().data_ptr()),
reinterpret_cast<typename DeviceKernel::ElementB*>(B.contiguous().data_ptr()),
ptrC,
reinterpret_cast<typename DeviceKernel::ElementC*>(D.contiguous().data_ptr()),
ElementCompute(alpha), ElementCompute(beta));
TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed");
return D;
}
"""
)
_PYTORCH_GEMM_IMPL_TEMPLATE_3x = (
common._CUTLASS_KERNEL_RUN_GEMM_3x
+ """
bool hw_info_queried = false;
cutlass::KernelHardwareInfo hw_info;
at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C, float alpha, float beta) {
int M = A.size(0);
int N = B.size(1);
int K = A.size(1);
int L = 1;
// Query hardware info if we haven't already
if (!hw_info_queried) {
hw_info.device_id = 0;
hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
}
typename DeviceKernel::ElementC* ptrC = (C == at::nullopt) ?
nullptr :
reinterpret_cast<typename DeviceKernel::ElementC*>(C->contiguous().data_ptr());
at::Tensor D = B.new_empty({M, N}, ${torch_type_C});
cutlass::Status status = ${name}_kernel_run(M, N, K, L,
reinterpret_cast<typename DeviceKernel::ElementA*>(A.contiguous().data_ptr()),
reinterpret_cast<typename DeviceKernel::ElementB*>(B.contiguous().data_ptr()),
ptrC,
reinterpret_cast<typename DeviceKernel::ElementC*>(D.contiguous().data_ptr()),
ElementCompute(alpha), ElementCompute(beta),
hw_info);
TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed");
return D;
}
"""
)
_PYTORCH_GROUPED_GEMM_IMPL_TEMPLATE = (
common._CUTLASS_KERNEL_RUN_GROUPED_GEMM_2x
+ """
std::vector<at::Tensor> ${name}_kernel(const std::vector<at::Tensor>& A, const std::vector<at::Tensor>& B, at::optional<const std::vector<at::Tensor>> C, float alpha, float beta) {
size_t num = A.size();
// To avoid performing many small cudaMallocs and host-to-device copies,
// we serialize the grouped GEMM arguments on the host, allocate one
// large chunk of device memory, and perform a single cudaMemcpy to
// copy the host data to the device. Allocation overheads could be
// avoided by using a memory pool.
// Calculate the total size of the data to be copied from host to device
size_t total_size = sizeof(cutlass::gemm::GemmCoord) +
sizeof(DeviceKernel::ElementA*) +
sizeof(DeviceKernel::ElementB*) +
sizeof(DeviceKernel::ElementC*) +
sizeof(DeviceKernel::ElementC*) +
sizeof(int64_t) +
sizeof(int64_t) +
sizeof(int64_t);
total_size *= num;
// num * sizeof(cutlass::gemm::GemmCoord) may leave one at a non-multiple
// of sizeof(DeviceKernel::ElementA*) (which will be 64 on a 64-bit system).
// To ensure that we don't end up having misaligned loads in the kernel,
// we pad to the nearest multiple of 8.
//
// Note that, even on a 32-bit system (for which sizeof(X*) will not equal
// sizeof(int64_t)), only padding between the list of GemmCoords and the
// list of ptr_As is sufficient because the set of four equal-length lists of pointers
// (A*, B*, C*, D*) will ensure that the first list of int64_ts will always
// start on a multiple of 8.
int64_t padding = 8 - (total_size % 8);
total_size += padding;
uint8_t* host_data = new uint8_t[total_size];
cutlass::DeviceAllocation<uint8_t> device_data(total_size);
uint8_t* start = host_data;
cutlass::gemm::GemmCoord* problem_sizes_host = reinterpret_cast<cutlass::gemm::GemmCoord*>(start);
// Apply the padding after the list of GemmCoords
start += num * sizeof(cutlass::gemm::GemmCoord) + padding;
int64_t ptr_A_offset = start - host_data;
DeviceKernel::ElementA** ptr_A_host = reinterpret_cast<DeviceKernel::ElementA**>(start);
start += num * sizeof(DeviceKernel::ElementA*);
int64_t ptr_B_offset = start - host_data;
DeviceKernel::ElementB** ptr_B_host = reinterpret_cast<DeviceKernel::ElementB**>(start);
start += num * sizeof(DeviceKernel::ElementB*);
int64_t ptr_C_offset = start - host_data;
DeviceKernel::ElementC** ptr_C_host = reinterpret_cast<DeviceKernel::ElementC**>(start);
start += num * sizeof(DeviceKernel::ElementC*);
int64_t ptr_D_offset = start - host_data;
DeviceKernel::ElementC** ptr_D_host = reinterpret_cast<DeviceKernel::ElementC**>(start);
start += num * sizeof(DeviceKernel::ElementC*);
int64_t lda_offset = start - host_data;
int64_t* lda_host = reinterpret_cast<int64_t*>(start);
start += num * sizeof(int64_t);
int64_t ldb_offset = start - host_data;
int64_t* ldb_host = reinterpret_cast<int64_t*>(start);
start += num * sizeof(int64_t);
int64_t ldc_offset = start - host_data;
int64_t* ldc_host = reinterpret_cast<int64_t*>(start);
start += num * sizeof(int64_t);
std::vector<at::Tensor> D(num);
bool need_C = (C != at::nullopt) && (beta != 0.f);
for (size_t i = 0; i < num; ++i) {
int M = A[i].size(0);
int N = B[i].size(1);
int K = A[i].size(1);
*(problem_sizes_host + i) = {M, N, K};
*(ptr_A_host + i) = reinterpret_cast<typename DeviceKernel::ElementA*>(A[i].contiguous().data_ptr());
*(ptr_B_host + i) = reinterpret_cast<typename DeviceKernel::ElementB*>(B[i].contiguous().data_ptr());
if (need_C) {
*(ptr_C_host + i) = reinterpret_cast<typename DeviceKernel::ElementC*>(C->at(i).contiguous().data_ptr());
}
else {
*(ptr_C_host + i) = nullptr;
}
D[i] = B[i].new_empty({M, N}, ${torch_type_C});
*(ptr_D_host + i) = reinterpret_cast<typename DeviceKernel::ElementC*>(D[i].contiguous().data_ptr());
*(lda_host + i) = DeviceKernel::LayoutA::packed({M, K}).stride(0);
*(ldb_host + i) = DeviceKernel::LayoutB::packed({K, N}).stride(0);
*(ldc_host + i) = DeviceKernel::LayoutC::packed({M, N}).stride(0);
}
device_data.copy_from_host(host_data);
cutlass::Status status = ${name}_kernel_run(
num,
reinterpret_cast<cutlass::gemm::GemmCoord*>(device_data.get()),
reinterpret_cast<DeviceKernel::ElementA**>(device_data.get() + ptr_A_offset),
reinterpret_cast<DeviceKernel::ElementB**>(device_data.get() + ptr_B_offset),
reinterpret_cast<DeviceKernel::ElementC**>(device_data.get() + ptr_C_offset),
reinterpret_cast<DeviceKernel::ElementC**>(device_data.get() + ptr_D_offset),
reinterpret_cast<int64_t*>(device_data.get() + lda_offset),
reinterpret_cast<int64_t*>(device_data.get() + ldb_offset),
reinterpret_cast<int64_t*>(device_data.get() + ldc_offset),
reinterpret_cast<int64_t*>(device_data.get() + ldc_offset),
ElementCompute(alpha), ElementCompute(beta));
delete[] host_data;
TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed");
return D;
}
"""
)
_PYTORCH_CONV2D_IMPL_TEMPLATE_2x = """
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
cutlass::Status status = ${name}_kernel_run(
&problem_size,
reinterpret_cast<typename UnderlyingKernel::ElementA*>(A.data_ptr()),
reinterpret_cast<typename UnderlyingKernel::ElementB*>(B.data_ptr()),
ptrC,
reinterpret_cast<typename UnderlyingKernel::ElementC*>(D.data_ptr()),
alpha, beta,
split_k_mode, stream, B.device().index());
TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed");
return D;
}
"""
_PYTORCH_CONV2D_FPROP_IMPL_TEMPLATE_2x = (
common._CUTLASS_KERNEL_RUN_CONV2D_2x
+ """
at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1},
float alpha=1.f, float beta=0.f, std::string split_k_mode="serial", int split_k_slices=1) {
int N, H, W, C_, K, R, S, P, Q;
N = A.size(0);
C_ = A.size(1);
H = A.size(2);
W = A.size(3);
K = B.size(0);
R = B.size(2);
S = B.size(3);
cutlass::conv::Conv2dProblemSize problem_size(
cutlass::Tensor4DCoord(N, H, W, C_),
cutlass::Tensor4DCoord(K, R, S, C_),
cutlass::Tensor4DCoord(std::get<0>(padding), std::get<0>(padding), std::get<1>(padding), std::get<1>(padding)),
cutlass::MatrixCoord(std::get<0>(stride), std::get<1>(stride)),
cutlass::MatrixCoord(std::get<0>(dilation), std::get<1>(dilation)),
cutlass::conv::Mode::kCrossCorrelation,
split_k_slices
);
P = problem_size.P;
Q = problem_size.Q;
typename UnderlyingKernel::ElementC* ptrC = (C == at::nullopt) ?
nullptr :
reinterpret_cast<typename UnderlyingKernel::ElementC*>(C->data_ptr());
torch::TensorOptions options = torch::TensorOptions().dtype(${torch_type_C}).device(B.device()).memory_format(at::MemoryFormat::ChannelsLast);
at::Tensor D = torch::zeros({N, K, P, Q}, options);
""" + _PYTORCH_CONV2D_IMPL_TEMPLATE_2x
)
_PYTORCH_CONV2D_DGRAD_IMPL_TEMPLATE_2x = (
common._CUTLASS_KERNEL_RUN_CONV2D_2x
+ """
at::Tensor ${name}_kernel(std::tuple<int, int, int, int> input_size, const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1}, float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1) {
int N, H, W, C_, K, R, S;
N = std::get<0>(input_size);
C_ = std::get<1>(input_size);
H = std::get<2>(input_size);
W = std::get<3>(input_size);
K = B.size(0);
R = B.size(2);
S = B.size(3);
cutlass::conv::Conv2dProblemSize problem_size(
cutlass::Tensor4DCoord(N, H, W, C_),
cutlass::Tensor4DCoord(K, R, S, C_),
cutlass::Tensor4DCoord(std::get<0>(padding), std::get<0>(padding), std::get<1>(padding), std::get<1>(padding)),
cutlass::MatrixCoord(std::get<0>(stride), std::get<1>(stride)),
cutlass::MatrixCoord(std::get<0>(dilation), std::get<1>(dilation)),
cutlass::conv::Mode::kCrossCorrelation,
split_k_slices
);
typename UnderlyingKernel::ElementC* ptrC = (C == at::nullopt) ?
nullptr :
reinterpret_cast<typename UnderlyingKernel::ElementC*>(C->data_ptr());
torch::TensorOptions options = torch::TensorOptions().dtype(${torch_type_C}).device(B.device()).memory_format(at::MemoryFormat::ChannelsLast);
at::Tensor D = torch::empty({N, C_, H, W}, options);
""" + _PYTORCH_CONV2D_IMPL_TEMPLATE_2x
)
_PYTORCH_CONV2D_WGRAD_IMPL_TEMPLATE_2x = (
common._CUTLASS_KERNEL_RUN_CONV2D_2x
+ """
at::Tensor ${name}_kernel(std::tuple<int, int, int, int> weight_size, const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1}, float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1) {
int N, H, W, C_, K, R, S;
K = std::get<0>(weight_size);
C_ = std::get<1>(weight_size);
R = std::get<2>(weight_size);
S = std::get<3>(weight_size);
N = B.size(0);
H = B.size(2);
W = B.size(3);
cutlass::conv::Conv2dProblemSize problem_size(
cutlass::Tensor4DCoord(N, H, W, C_),
cutlass::Tensor4DCoord(K, R, S, C_),
cutlass::Tensor4DCoord(std::get<0>(padding), std::get<0>(padding), std::get<1>(padding), std::get<1>(padding)),
cutlass::MatrixCoord(std::get<0>(stride), std::get<1>(stride)),
cutlass::MatrixCoord(std::get<0>(dilation), std::get<1>(dilation)),
cutlass::conv::Mode::kCrossCorrelation,
split_k_slices
);
typename UnderlyingKernel::ElementC* ptrC = (C == at::nullopt) ?
nullptr :
reinterpret_cast<typename UnderlyingKernel::ElementC*>(C->data_ptr());
torch::TensorOptions options = torch::TensorOptions().dtype(${torch_type_C}).device(B.device()).memory_format(at::MemoryFormat::ChannelsLast);
at::Tensor D = torch::empty({K, C_, R, S}, options);
""" + _PYTORCH_CONV2D_IMPL_TEMPLATE_2x
)
_PYTORCH_SETUP_PY = common._PYSTYLE_AUTOGEN_COMMENT + """
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='${name}',
ext_modules=[
CUDAExtension('${name}', [
'${name}.cpp',
'${name}_kernel.cu',
],
include_dirs=['${cutlass_path}/include', '${cutlass_path}/tools/util/include'],
extra_compile_args={
'cxx': ['-std=c++17'],
'nvcc': ['-std=c++17', ${extra_compile_args}],
},
libraries=['cuda']
),
],
cmdclass={
'build_ext': BuildExtension
})
"""
def _generate_setup(name: str, sourcedir: str, extra_compile_args: str=""):
"""
Generates a setup.py file for the extension
:param name: name of the module to generate
:type name: str
:param sourcedir: directory to which generated source files should be written
:type sourcedir: str
:param extra_compile_args: additional arguments to pass to setup.py
:type extra_args: str
"""
setup_py_file = os.path.join(sourcedir, "setup.py")
setup_source = SubstituteTemplate(
_PYTORCH_SETUP_PY, {"name": name, "cutlass_path": CUTLASS_PATH, "extra_compile_args": extra_compile_args}
)
with open(setup_py_file, "w") as outfile:
outfile.write(setup_source)
class _ArchListSetter:
"""
Utility context manager for temporarily setting the value of the ``TORCH_CUDA_ARCH_LIST``
environment variable when building a PyTorch CUDA module.
``TORCH_CUDA_ARCH_LIST`` is a space-delmited list of compute capabilites for which a PyTorch
CUDA module should be compiled.
For example, ``TORCH_CUDA_ARCH_LIST="7.0 8.0"`` would result in the inclusion of
``-gencode=arch=compute_70,code=sm_70`` and ``-gencode=arch=compute_80,code=sm_80`` in the
compilation of the module.
This utility wraps the building of a PyTorch CUDA module with a setting of this environment
variable according to the current compute capability being targetted.
Example usage:
.. highlight:: python
.. code-block:: python
# Temporarily set TORCH_CUDA_ARCH_LIST="8.0"
with _ArchListSetter(80):
# Perform JIT compilation and loading of the module
mod = torch.utils.cpp_extension.load(...)
:param cc: compute capability
:type cc: int
"""
_TORCH_CUDA_ARCH_LIST = "TORCH_CUDA_ARCH_LIST"
def __init__(self, cc: int):
self.cc_str = ".".join(list(str(cc)))
def __enter__(self):
"""
Saves the old value of TORCH_CUDA_ARCH_LIST and reset it to the new value based on ``cc``
"""
self.old_arch_list = os.getenv(_ArchListSetter._TORCH_CUDA_ARCH_LIST)
os.environ[_ArchListSetter._TORCH_CUDA_ARCH_LIST] = self.cc_str
return self
def __exit__(self, exc_type, exc_val, traceback):
"""
Restores the old value of TORCH_CUDA_ARCH_LIST
"""
if self.old_arch_list is None:
del os.environ[_ArchListSetter._TORCH_CUDA_ARCH_LIST]
else:
os.environ[_ArchListSetter._TORCH_CUDA_ARCH_LIST] = self.old_arch_list
def _jit(name: str, cc: int, cpp_file: str, cuda_file: str):
"""
JIT compiles and loads a PyTorch CUDA extension.
:param name: name of the module to generate
:type name: str
:param cc: compute capability of the device the module should target
:type cc: int
:param cpp_file: path to file containing extension's C++ interface
:type cpp_file: str
:param cuda_file: path to file containing extension's CUDA interface
:type cuda_file: str
:return: loaded PyTorch module
"""
from torch.utils.cpp_extension import load
extra_cuda_cflags = ["-std=c++17"]
if cc == 90:
# PyTorch does not currently add the sm_90a target when compute capability
# 9.0 is set within TORCH_CUDA_ARCH_LIST. Thus, we manually add the sm_90a target.
extra_cuda_cflags.append("-gencode=arch=compute_90a,code=sm_90a")
with _ArchListSetter(cc):
jitmodule = load(
name,
[cpp_file, cuda_file],
extra_cuda_cflags=extra_cuda_cflags,
extra_include_paths=[
os.path.join(CUTLASS_PATH, "include"),
os.path.join(CUTLASS_PATH, "tools/util/include"),
],
extra_ldflags=["-lcuda"],
verbose=(logger.level == logging.DEBUG)
)
return jitmodule
def _pytorch_gemm(op, name: str, cc: int, jit: bool = False, sourcedir: str = ""):
"""
Generates source for building a PyTorch CUDA module that leverages the CUTLASS GEMM
specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time
compiled, loaded, and returned.
:param op: operation to emit in the module
:param name: name of the module to generate
:type name: str
:param cc: compute capability of the device the module should target
:type cc: int
:param jit: whether the module should be just-in-time compiled
:type jit: bool
:param sourcedir: directory to which generated source files should be written
:type sourcedir: str
:return: loaded PyTorch module if ``jit=True`` or ``None`` otherwise
"""
if sourcedir != "" and not os.path.isdir(sourcedir):
os.makedirs(sourcedir)
cuda_file = os.path.join(sourcedir, name + "_kernel.cu")
extra_kw = {}
if op.api == ApiVersion.v3x:
impl_template = _PYTORCH_GEMM_IMPL_TEMPLATE_3x
else:
impl_template = _PYTORCH_GEMM_IMPL_TEMPLATE_2x
if op.swizzling_functor == swizzle.ThreadblockSwizzleStreamK:
extra_kw["args"] = common._CUTLASS_KERNEL_ARGS_2x_STREAM_K
else:
extra_kw["args"] = common._CUTLASS_KERNEL_ARGS_2x
impl_template = (
_PYTORCH_GEMM_IMPL_TEMPLATE_3x
if op.api == ApiVersion.v3x
else _PYTORCH_GEMM_IMPL_TEMPLATE_2x
)
cuda_impl = SubstituteTemplate(impl_template, {"name": name, **extra_kw})
cuda_source = SubstituteTemplate(
_PYTORCH_CUDA_TEMPLATE,
{
"includes": _PYTORCH_GEMM_INCLUDES[op.api],
"declaration": op.rt_module.emit(),
"procedural_name": op.procedural_name(),
"impl": cuda_impl,
"torch_type_C": _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element],
},
)
with open(cuda_file, "w") as outfile:
outfile.write(cuda_source)
cpp_file = os.path.join(sourcedir, name + ".cpp")
cpp_source = SubstituteTemplate(
_PYTORCH_GEMM_CPP_TEMPLATE,
{"name": name, "description": f"CUTLASS {op.procedural_name()} GEMM"},
)
with open(cpp_file, "w") as outfile:
outfile.write(cpp_source)
extra_compile_args = ""
if cc == 90:
extra_compile_args = "'--generate-code=arch=compute_90a,code=[sm_90a]'"
_generate_setup(name, sourcedir, extra_compile_args)
if jit:
return _jit(name, cc, cpp_file, cuda_file)
return None
def _pytorch_grouped_gemm(
op, name: str, cc: int, jit: bool = False, sourcedir: str = ""
):
"""
Generates source for building a PyTorch CUDA module that leverages the CUTLASS grouped GEMM
specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time
compiled, loaded, and returned.
:param op: operation to emit in the module
:param name: name of the module to generate
:type name: str
:param cc: compute capability of the device the module should target
:type cc: int
:param jit: whether the module should be just-in-time compiled
:type jit: bool
:param sourcedir: directory to which generated source files should be written
:type sourcedir: str
:return: loaded PyTorch module if ``jit=True`` or ``None`` otherwise
"""
if op.api != ApiVersion.v2x:
raise Exception("Grouped GEMM is currently only supported for CUTLASS 2.x")
if sourcedir != "" and not os.path.isdir(sourcedir):
os.makedirs(sourcedir)
cuda_file = os.path.join(sourcedir, name + "_kernel.cu")
cuda_impl = SubstituteTemplate(_PYTORCH_GROUPED_GEMM_IMPL_TEMPLATE, {"name": name})
cuda_source = SubstituteTemplate(
_PYTORCH_CUDA_TEMPLATE,
{
"includes": _PYTORCH_GROUPED_GEMM_INCLUDES,
"declaration": op.rt_module.emit(),
"procedural_name": op.procedural_name(),
"impl": cuda_impl,
"torch_type_C": _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element],
},
)
with open(cuda_file, "w") as outfile:
outfile.write(cuda_source)
cpp_file = os.path.join(sourcedir, name + ".cpp")
cpp_source = SubstituteTemplate(
_PYTORCH_GROUPED_GEMM_CPP_TEMPLATE,
{"name": name, "description": f"CUTLASS {op.procedural_name()} grouped GEMM"},
)
with open(cpp_file, "w") as outfile:
outfile.write(cpp_source)
_generate_setup(name, sourcedir)
if jit:
return _jit(name, cc, cpp_file, cuda_file)
return None
def _pytorch_conv2d(op, name: str, cc: int, jit: bool = False, sourcedir: str = ""):
"""
Generates source for building a PyTorch CUDA module that leverages the CUTLASS Conv2d
specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time
compiled, loaded, and returned.
:param op: operation to emit in the module
:param name: name of the module to generate
:type name: str
:param cc: compute capability of the device the module should target
:type cc: int
:param jit: whether the module should be just-in-time compiled
:type jit: bool
:param sourcedir: directory to which generated source files should be written
:type sourcedir: str
Note that the when conv kind is `dgrad` or `wgrad`, the size of the input `(N, C, H, W)` or
weight `(K, C, R, S)` should be provided. This is because there are multiple valid solutions
for H/W/R/S given the same P/Q.
:return: loaded PyTorch module if ``jit=True`` or ``None`` otherwise
"""
if sourcedir != "" and not os.path.isdir(sourcedir):
os.makedirs(sourcedir)
cuda_file = os.path.join(sourcedir, name + "_kernel.cu")
extra_kw = {}
if op.conv_kind == ConvKind.Fprop:
impl_template = _PYTORCH_CONV2D_FPROP_IMPL_TEMPLATE_2x
cpp_template = _PYTORCH_CONV2D_FPROP_CPP_TEMPLATE
elif op.conv_kind == ConvKind.Dgrad:
impl_template = _PYTORCH_CONV2D_DGRAD_IMPL_TEMPLATE_2x
cpp_template = _PYTORCH_CONV2D_GRAD_CPP_TEMPLATE
elif op.conv_kind == ConvKind.Wgrad:
impl_template = _PYTORCH_CONV2D_WGRAD_IMPL_TEMPLATE_2x
cpp_template = _PYTORCH_CONV2D_GRAD_CPP_TEMPLATE
extra_kw["conv_kind_name"] = ConvKindNames[op.conv_kind].capitalize()
extra_kw["torch_type_C"] = _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element]
cuda_impl = SubstituteTemplate(impl_template, {"name": name, **extra_kw})
cuda_source = SubstituteTemplate(
_PYTORCH_CUDA_TEMPLATE,
{
"includes": _PYTORCH_CONV2D_INCLUDES,
"declaration": op.rt_module.emit(),
"procedural_name": op.procedural_name(),
"impl": cuda_impl,
"torch_type_C": _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element],
},
)
with open(cuda_file, "w") as outfile:
outfile.write(cuda_source)
cpp_file = os.path.join(sourcedir, name + ".cpp")
cpp_source = SubstituteTemplate(
cpp_template,
{"name": name, "description": f"CUTLASS {op.procedural_name()} Conv2d"},
)
with open(cpp_file, "w") as outfile:
outfile.write(cpp_source)
_generate_setup(name, sourcedir)
if jit:
return _jit(name, cc, cpp_file, cuda_file)
return None
def pytorch(op, name: str, cc: int, jit: bool = False, sourcedir: str = ""):
"""
Generates source for building a PyTorch CUDA module that leverages the CUTLASS kernel
specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time
compiled, loaded, and returned.
The result of this method is files within ``sourcedir`` that can be used for building
a PyTorch module.
:param op: operation to emit in the module
:param name: name of the module to generate
:type name: str
:param cc: compute capability of the device the module should target
:type cc: int
:param jit: whether the module should be just-in-time compiled
:type jit: bool
:param sourcedir: directory to which generated source files should be written
:type sourcedir: str
:return: loaded PyTorch module (if ``jit=True``) or None
"""
device_op = op.device_op()
if isinstance(op, GemmOperationUniversal):
return _pytorch_gemm(device_op, name, cc, jit, sourcedir)
elif isinstance(op, GemmOperationGrouped):
return _pytorch_grouped_gemm(device_op, name, cc, jit, sourcedir)
elif isinstance(op, Conv2dOperation):
return _pytorch_conv2d(device_op, name, cc, jit, sourcedir)
else:
raise Exception(
f"Operation type {type(op)} is not currently supported for PyTorch emission."
)
| python/cutlass/emit/pytorch.py/0 | {
"file_path": "python/cutlass/emit/pytorch.py",
"repo_id": "python",
"token_count": 16302
} | 43 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import os
import sys
from . import conv2d_operation
from . import conv3d_operation
from . import gemm_operation
if '-m' not in sys.argv:
# Do not import generator when running python -m cutlass_library.generator to
# avoid double-import warnings
from . import generator
from . import library
from . import manifest
from . import rank_2k_operation
from . import rank_k_operation
from . import symm_operation
from . import trmm_operation
# Make enum types from library.py accessible via cutlass_library.*
from .library import *
# Set up `source` to point to the path containing the CUTLASS source.
# Check first if the path cotains a `source` subdirectory -- this will
# be the case when the package has been installed via pip. Otherwise,
# default to the root of CUTLASS.
install_source_path = os.path.join(__path__[0], 'source')
if os.path.isdir(install_source_path):
source_path = install_source_path
else:
source_path = os.path.join(__path__[0], '../..')
| python/cutlass_library/__init__.py/0 | {
"file_path": "python/cutlass_library/__init__.py",
"repo_id": "python",
"token_count": 757
} | 44 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Methods for layout swizzling
"""
from .layout import *
def shiftr(a, s):
return a >> s if s > 0 else shiftl(a, -s)
def shiftl(a, s):
return a << s if s > 0 else shiftr(a, -s)
## A generic Swizzle functor
# 0bxxxxxxxxxxxxxxxYYYxxxxxxxZZZxxxx
# ^--^ Base is the number of least-sig bits to keep constant
# ^-^ ^-^ Bits is the number of bits in the mask
# ^---------^ Shift is the distance to shift the YYY mask
# (pos shifts YYY to the right, neg shifts YYY to the left)
#
# e.g. Given
# 0bxxxxxxxxxxxxxxxxYYxxxxxxxxxZZxxx
# the result is
# 0bxxxxxxxxxxxxxxxxYYxxxxxxxxxAAxxx where AA = ZZ xor YY
#
class Swizzle:
def __init__(self, bits, base, shift):
assert bits >= 0
assert base >= 0
assert abs(shift) >= bits
self.bits = bits
self.base = base
self.shift = shift
bit_msk = (1 << bits) - 1
self.yyy_msk = bit_msk << (base + max(0,shift))
self.zzz_msk = bit_msk << (base - min(0,shift))
# operator () (transform integer)
def __call__(self, offset):
return offset ^ shiftr(offset & self.yyy_msk, self.shift)
# Size of the domain
def size(self):
return 1 << (bits + base + abs(shift))
# Size of the codomain
def cosize(self):
return self.size()
# print and str
def __str__(self):
return f"SW_{self.bits}_{self.base}_{self.shift}"
# error msgs and representation
def __repr__(self):
return f"Swizzle({self.bits},{self.base},{self.shift})"
class ComposedLayout(LayoutBase):
def __init__(self, layoutB, offset, layoutA):
self.layoutB = layoutB
self.offset = offset
self.layoutA = layoutA
# operator ==
def __eq__(self, other):
return self.layoutB == other.layoutB and self.offset == other.offset and self.layoutA == other.layoutA
# operator len(L) (len [rank] like tuples)
def __len__(self):
return len(self.layoutA)
# operator () (map coord to idx)
def __call__(self, *args):
return self.layoutB(self.offset + self.layoutA(*args))
# operator [] (get-i like tuples)
def __getitem__(self, i):
return ComposedLayout(self.layoutB, self.offset, self.layoutA[i])
# size(layout) Size of the domain
def size(self):
return size(self.layoutA)
# cosize(layout) Size of the codomain
def cosize(self):
return cosize(self.layoutB)
# print and str
def __str__(self):
return f"{self.layoutB} o {self.offset} o {self.layoutA}"
# error msgs and representation
def __repr__(self):
return f"ComposedLayout({repr(self.layoutB)},{repr(self.offset)},{repr(self.layoutA)})"
| python/pycute/swizzle.py/0 | {
"file_path": "python/pycute/swizzle.py",
"repo_id": "python",
"token_count": 1533
} | 45 |
################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Unit test for store nodes in SM90
"""
import logging
import unittest
import cutlass
from cutlass.backend import *
from cutlass.epilogue import *
from utils.evt_testbed import EVTTestBed, EVTTestCaseBase
cutlass.set_log_level(logging.WARNING)
@unittest.skipIf(device_cc() not in [80, 86, 89, 90], "This unittest is only supported on CC [80, 86, 89, 90]")
class TestEVTStore(EVTTestCaseBase):
def test_aux_store(self):
"""
Returning a tensor with shape [m, n]
"""
def evt_aux_store(accum, alpha, C):
F = alpha * accum
D = F + C
return D, F
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 0.5,
"C": self.fake_tensor(self.element, (l, m, n)),
"F": self.fake_tensor(self.element, (l, m, n)),
"D": self.fake_tensor(self.element, (l, m, n)),
}
launcher = EVTTestBed(self.element, evt_aux_store, example_inputs)
input_keys = ["C", "alpha"]
result_keys = ["D", "F"]
launcher.verify((m, n, k), input_keys, result_keys, l)
def test_col_reduce(self):
"""
Reduction [m, n] -> [m, 1]
"""
def evt_row_reduce(accum, alpha, C):
acc_row_max = max(accum, dim=[2,])
F = alpha * accum
F_row_max = max(F, dim=[0, 2])
D = F + C
return D, F_row_max, acc_row_max
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 2.0,
"C": self.fake_tensor(self.element, (l, m, n)),
"F_row_max": self.fake_tensor(np.float32, (m, 1)),
"acc_row_max": self.fake_tensor(np.float32, (l, m, 1)),
"D": self.fake_tensor(self.element, (l, m, n)),
}
launcher = EVTTestBed(self.element, evt_row_reduce, example_inputs)
input_keys = ["C", "alpha"]
result_keys = ["D", "F_row_max", "acc_row_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
def test_row_reduce(self):
"""
Reduction [m, n] -> [n]
"""
def evt_col_reduce(accum, alpha, C):
acc_col_max = max(accum, dim=[1,])
F = alpha * accum
F_col_max = max(F, dim=[0, 1])
D = F + C
return D, F_col_max, acc_col_max
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 2.0,
"C": self.fake_tensor(self.element, (l, m, n)),
"F_col_max": self.fake_tensor(np.float32, (n,)),
"acc_col_max": self.fake_tensor(np.float32, (l, 1, n)),
"D": self.fake_tensor(self.element, (l, m, n)),
}
launcher = EVTTestBed(self.element, evt_col_reduce, example_inputs)
input_keys = ["C", "alpha"]
result_keys = ["D", "F_col_max", "acc_col_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
def test_scalar_reduce(self):
"""
Reduction [m, n] -> [1,]
"""
def evt_scalar_reduce(accum, alpha, C):
acc_max = max(accum, dim=[1, 2])
F = alpha * accum
F_max = max(F, dim=[0, 1, 2])
D = F + C
return D, F_max, acc_max
for m, n, k, l in self.get_problem_sizes(8):
example_inputs = {
"accum": self.fake_tensor(self.element, (l, m, n)),
"alpha": 2.0,
"C": self.fake_tensor(self.element, (l, m, n)),
"acc_max": self.fake_tensor(np.float32, (l, 1, 1)),
"F_max": self.fake_tensor(np.float32, (1,)),
"D": self.fake_tensor(self.element, (l, m, n)),
}
launcher = EVTTestBed(self.element, evt_scalar_reduce, example_inputs)
input_keys = ["C", "alpha"]
result_keys = ["D", "F_max", "acc_max"]
launcher.verify((m, n, k), input_keys, result_keys, l)
if __name__ == '__main__':
unittest.main()
| test/python/cutlass/evt/evt_store_sm80_90.py/0 | {
"file_path": "test/python/cutlass/evt/evt_store_sm80_90.py",
"repo_id": "test",
"token_count": 2849
} | 46 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit test for the launch_on_cluster function
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/cluster_launch.hpp"
#include "cute/arch/cluster_sm90.hpp"
#include <cassert>
#include <memory>
#include <type_traits>
#if defined(CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED)
namespace { // (anonymous)
// Using a struct instead of a lambda makes it possible
// to name the deleter type without std::function
// (which type-erases).
struct scalar_deleter {
void operator() (float* p) {
if (p != nullptr) {
cudaFree(p);
}
}
};
using scalar_device_pointer = std::unique_ptr<float, scalar_deleter>;
// Each test needs to initialize this anew,
// from a scalar instance that is in scope during the test.
__device__ float* scalar_ptr_gpu;
// A single scalar value on device.
// The constructor allocates space on device for one value,
// copies the value to device, and sets the global pointer
// `scalar_ptr_gpu` (see above) to point to it.
// sync_to_host() copies that value back to host.
//
// This class exists only for the tests in this file.
// In order to know whether a kernel that launch_on_cluster
// claimed to launch actually got launched, each kernel
// performs a side effect: it modifies the scalar value
// through the scalar_ptr_gpu value.
// It performs a side effect through a global,
// rather than through an argument,
// so that we can test kernel launch
// with kernels that take zero parameters.
class scalar {
private:
static constexpr std::size_t num_bytes = sizeof(float);
public:
scalar(float value) : value_host_(value)
{
float* ptr_gpu_raw = nullptr;
auto err = cudaMalloc(&ptr_gpu_raw, num_bytes);
assert(err == cudaSuccess);
scalar_device_pointer ptr_gpu{ptr_gpu_raw, scalar_deleter{}};
err = cudaMemcpy(ptr_gpu.get(), &value_host_,
num_bytes, cudaMemcpyHostToDevice);
assert(err == cudaSuccess);
ptr_gpu_ = std::move(ptr_gpu);
upload_device_pointer();
}
float sync_to_host()
{
auto err = cudaMemcpy(&value_host_, ptr_gpu_.get(),
num_bytes, cudaMemcpyDeviceToHost);
assert(err == cudaSuccess);
return value_host_;
}
private:
void upload_device_pointer()
{
float* ptr_raw = ptr_gpu_.get();
auto err = cudaMemcpyToSymbol(scalar_ptr_gpu, &ptr_raw, sizeof(float*));
assert(err == cudaSuccess);
}
float value_host_ = 0.0;
scalar_device_pointer ptr_gpu_;
};
template<int cluster_x, int cluster_y, int cluster_z>
CUTE_DEVICE void check_cluster_shape() {
[[maybe_unused]] const dim3 cluster_shape = cute::cluster_shape();
assert(cluster_shape.x == cluster_x);
assert(cluster_shape.y == cluster_y);
assert(cluster_shape.z == cluster_z);
}
template<int cluster_x, int cluster_y, int cluster_z>
__global__ void kernel_0()
{
check_cluster_shape<cluster_x, cluster_y, cluster_z>();
// Write to global memory, so that we know
// whether the kernel actually ran.
const dim3 block_id = cute::block_id_in_cluster();
if (threadIdx.x == 0 && block_id.x == 0 && block_id.y == 0 && block_id.z == 0) {
*scalar_ptr_gpu = 0.1f;
}
}
template<int cluster_x, int cluster_y, int cluster_z,
int expected_p0>
__global__ void kernel_1(int p0)
{
check_cluster_shape<cluster_x, cluster_y, cluster_z>();
assert(p0 == expected_p0);
// Write to global memory, so that we know
// whether the kernel actually ran.
const dim3 block_id = cute::block_id_in_cluster();
if (threadIdx.x == 0 && block_id.x == 0 && block_id.y == 0 && block_id.z == 0) {
*scalar_ptr_gpu = 1.2f;
}
}
template<int cluster_x, int cluster_y, int cluster_z,
int expected_p0,
int expected_p2>
__global__ void kernel_2(int p0, void* p1, int p2)
{
check_cluster_shape<cluster_x, cluster_y, cluster_z>();
assert(p0 == expected_p0);
assert(p1 == nullptr);
assert(p2 == expected_p2);
// Write to global memory, so that we know
// whether the kernel actually ran.
const dim3 block_id = cute::block_id_in_cluster();
if (threadIdx.x == 0 && block_id.x == 0 && block_id.y == 0 && block_id.z == 0) {
*scalar_ptr_gpu = 2.3f;
}
}
struct OverloadedOperatorAmpersand {
struct tag_t {};
// Test that kernel launch uses the actual address,
// instead of any overloaded operator& that might exist.
CUTE_HOST_DEVICE tag_t operator& () const {
return {};
}
int x = 0;
int y = 0;
int z = 0;
int w = 0;
};
static_assert(sizeof(OverloadedOperatorAmpersand) == 4 * sizeof(int));
template<int cluster_x, int cluster_y, int cluster_z,
int expected_p0,
int expected_p1_x,
int expected_p1_y,
int expected_p1_z,
int expected_p1_w,
std::uint64_t expected_p2>
__global__ void kernel_3(int p0, OverloadedOperatorAmpersand p1, std::uint64_t p2)
{
check_cluster_shape<cluster_x, cluster_y, cluster_z>();
assert(p0 == expected_p0);
assert(p1.x == expected_p1_x);
assert(p1.y == expected_p1_y);
assert(p1.z == expected_p1_z);
assert(p1.w == expected_p1_w);
assert(p2 == expected_p2);
// Write to global memory, so that we know
// whether the kernel actually ran.
const dim3 block_id = cute::block_id_in_cluster();
if (threadIdx.x == 0 && block_id.x == 0 && block_id.y == 0 && block_id.z == 0) {
*scalar_ptr_gpu = 3.4f;
}
}
} // namespace (anonymous)
TEST(SM90_ClusterLaunch, Kernel_0)
{
scalar global_value(-1.0f);
const dim3 grid_dims{2, 1, 1};
const dim3 block_dims{1, 1, 1};
const dim3 cluster_dims{grid_dims.x * block_dims.x, 1, 1};
const int smem_size_in_bytes = 0;
cutlass::ClusterLaunchParams params{
grid_dims, block_dims, cluster_dims, smem_size_in_bytes};
void const* kernel_ptr = reinterpret_cast<void const*>(&kernel_0<2, 1, 1>);
cutlass::Status status = cutlass::launch_kernel_on_cluster(params,
kernel_ptr);
ASSERT_EQ(status, cutlass::Status::kSuccess);
cudaError_t result = cudaDeviceSynchronize();
if (result == cudaSuccess) {
CUTLASS_TRACE_HOST("Kernel launch succeeded\n");
}
else {
CUTLASS_TRACE_HOST("Kernel launch FAILED\n");
cudaError_t error = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << "Error at kernel sync: "
<< cudaGetErrorString(error) << "\n";
}
ASSERT_EQ(global_value.sync_to_host(), 0.1f);
}
TEST(SM90_ClusterLaunch, Kernel_1)
{
scalar global_value(-1.0f);
const dim3 grid_dims{2, 1, 1};
const dim3 block_dims{1, 1, 1};
const dim3 cluster_dims{grid_dims.x * block_dims.x, 1, 1};
const int smem_size_in_bytes = 0;
cutlass::ClusterLaunchParams params{
grid_dims, block_dims, cluster_dims, smem_size_in_bytes};
constexpr int expected_p0 = 42;
void const* kernel_ptr = reinterpret_cast<void const*>(&kernel_1<2, 1, 1, expected_p0>);
const int p0 = expected_p0;
cutlass::Status status = cutlass::launch_kernel_on_cluster(params,
kernel_ptr, p0);
ASSERT_EQ(status, cutlass::Status::kSuccess);
cudaError_t result = cudaDeviceSynchronize();
if (result == cudaSuccess) {
#if (CUTLASS_DEBUG_TRACE_LEVEL > 1)
CUTLASS_TRACE_HOST("Kernel launch succeeded\n");
#endif
}
else {
CUTLASS_TRACE_HOST("Kernel launch FAILED\n");
cudaError_t error = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << "Error at kernel sync: "
<< cudaGetErrorString(error) << "\n";
}
ASSERT_EQ(global_value.sync_to_host(), 1.2f);
}
TEST(SM90_ClusterLaunch, Kernel_2)
{
scalar global_value(-1.0f);
const dim3 grid_dims{2, 1, 1};
const dim3 block_dims{1, 1, 1};
const dim3 cluster_dims{grid_dims.x * block_dims.x, 1, 1};
const int smem_size_in_bytes = 0;
cutlass::ClusterLaunchParams params{
grid_dims, block_dims, cluster_dims, smem_size_in_bytes};
constexpr int expected_p0 = 42;
constexpr int expected_p2 = 43;
int p0 = expected_p0;
int* p1 = nullptr;
int p2 = expected_p2;
void const* kernel_ptr = reinterpret_cast<void const*>(
&kernel_2<2, 1, 1, expected_p0, expected_p2>);
cutlass::Status status = cutlass::launch_kernel_on_cluster(params,
kernel_ptr, p0, p1, p2);
ASSERT_EQ(status, cutlass::Status::kSuccess);
cudaError_t result = cudaDeviceSynchronize();
if (result == cudaSuccess) {
#if (CUTLASS_DEBUG_TRACE_LEVEL > 1)
CUTLASS_TRACE_HOST("Kernel launch succeeded\n");
#endif
}
else {
CUTLASS_TRACE_HOST("Kernel launch FAILED\n");
cudaError_t error = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << "Error at kernel sync: "
<< cudaGetErrorString(error) << "\n";
}
ASSERT_EQ(global_value.sync_to_host(), 2.3f);
}
TEST(SM90_ClusterLaunch, Kernel_3)
{
scalar global_value(-1.0f);
const dim3 grid_dims{2, 1, 1};
const dim3 block_dims{1, 1, 1};
const dim3 cluster_dims{grid_dims.x * block_dims.x, 1, 1};
const int smem_size_in_bytes = 0;
cutlass::ClusterLaunchParams params{
grid_dims, block_dims, cluster_dims, smem_size_in_bytes};
constexpr int expected_p0 = 42;
constexpr int expected_p1_x = 1;
constexpr int expected_p1_y = 2;
constexpr int expected_p1_z = 3;
constexpr int expected_p1_w = 4;
constexpr std::uint64_t expected_p2 = 1'000'000'000'000uLL;
int p0 = expected_p0;
OverloadedOperatorAmpersand p1{expected_p1_x,
expected_p1_y, expected_p1_z, expected_p1_w};
// Verify that operator& is overloaded for this type.
static_assert(! std::is_same_v<decltype(&p1),
OverloadedOperatorAmpersand*>);
std::uint64_t p2 = expected_p2;
void const* kernel_ptr = reinterpret_cast<void const*>(
&kernel_3<2, 1, 1, expected_p0, expected_p1_x,
expected_p1_y, expected_p1_z, expected_p1_w,
expected_p2>);
cutlass::Status status = cutlass::launch_kernel_on_cluster(params,
kernel_ptr, p0, p1, p2);
ASSERT_EQ(status, cutlass::Status::kSuccess);
cudaError_t result = cudaDeviceSynchronize();
if (result == cudaSuccess) {
#if (CUTLASS_DEBUG_TRACE_LEVEL > 1)
CUTLASS_TRACE_HOST("Kernel launch succeeded\n");
#endif
}
else {
CUTLASS_TRACE_HOST("Kernel launch FAILED\n");
cudaError_t error = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << "Error at kernel sync: "
<< cudaGetErrorString(error) << "\n";
}
ASSERT_EQ(global_value.sync_to_host(), 3.4f);
}
#endif // CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED
| test/unit/cluster_launch/cluster_launch.cu/0 | {
"file_path": "test/unit/cluster_launch/cluster_launch.cu",
"repo_id": "test",
"token_count": 4594
} | 47 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cutlass/trace.h>
#include <cute/stride.hpp>
TEST(CuTe_core, CompactColMajor_Static)
{
using namespace cute;
CUTE_STATIC_ASSERT_V((compact_col_major(Int<1>{}) == Int<0>{}));
CUTE_STATIC_ASSERT_V((compact_col_major(Int<1>{}, Int<3>{}) == Int<0>{}));
CUTE_STATIC_ASSERT_V((compact_col_major(Int<8>{}) == Int<1>{}));
CUTE_STATIC_ASSERT_V((compact_col_major(Int<8>{}, Int<3>{}) == Int<3>{}));
CUTE_STATIC_ASSERT_V((compact_col_major(1) == Int<1>{}));
CUTE_STATIC_ASSERT_V((compact_col_major(8) == Int<1>{}));
{
auto test = make_tuple(Int<4>{}, Int<8>{});
auto result = make_tuple(Int<1>{}, Int<4>{});
CUTE_STATIC_ASSERT_V((compact_col_major(test) == result));
}
{
auto test = make_tuple(Int<4>{}, Int<8>{}, Int< 2>{});
auto result = make_tuple(Int<1>{}, Int<4>{}, Int<32>{});
CUTE_STATIC_ASSERT_V((compact_col_major(test) == result));
}
{
auto test = make_tuple(Int<4>{}, Int<8>{}, Int<1>{}, Int< 2>{});
auto result = make_tuple(Int<1>{}, Int<4>{}, Int<0>{}, Int<32>{});
CUTE_STATIC_ASSERT_V((compact_col_major(test) == result));
}
{
auto test = make_tuple(make_tuple(Int<4>{}, Int<8>{}), Int<1>{}, Int< 2>{});
auto result = make_tuple(make_tuple(Int<1>{}, Int<4>{}), Int<0>{}, Int<32>{});
CUTE_STATIC_ASSERT_V((compact_col_major(test) == result));
}
{
auto test = make_tuple(Int<4>{}, make_tuple(Int<8>{}, Int<1>{}, Int< 2>{}));
auto result = make_tuple(Int<1>{}, make_tuple(Int<4>{}, Int<0>{}, Int<32>{}));
CUTE_STATIC_ASSERT_V((compact_col_major(test) == result));
}
{
auto test = make_tuple(Int<4>{}, make_tuple(Int<8>{}, Int<1>{}, make_tuple(Int< 2>{}, Int< 3>{})));
auto result = make_tuple(Int<1>{}, make_tuple(Int<4>{}, Int<0>{}, make_tuple(Int<32>{}, Int<64>{})));
CUTE_STATIC_ASSERT_V((compact_col_major(test) == result));
}
}
TEST(CuTe_core, CompactColMajor_Dynamic)
{
using namespace cute;
ASSERT_TRUE((compact_col_major(1) == 1));
ASSERT_TRUE((compact_col_major(1, 3) == 3));
ASSERT_TRUE((compact_col_major(8) == 1));
ASSERT_TRUE((compact_col_major(8, 3) == 3));
ASSERT_TRUE((compact_col_major(1) == 1));
ASSERT_TRUE((compact_col_major(8) == 1));
{
auto test = make_tuple(4, 8);
auto result = make_tuple(1, 4);
ASSERT_TRUE((compact_col_major(test) == result));
}
{
auto test = make_tuple(4, 8, 2);
auto result = make_tuple(1, 4, 32);
ASSERT_TRUE((compact_col_major(test) == result));
}
{
auto test = make_tuple(4, 8, 1, 2);
auto result = make_tuple(1, 4, 32, 32);
ASSERT_TRUE((compact_col_major(test) == result));
}
{
auto test = make_tuple(make_tuple(4, 8), 1, 2);
auto result = make_tuple(make_tuple(1, 4), 32, 32);
ASSERT_TRUE((compact_col_major(test) == result));
}
{
auto test = make_tuple(4, make_tuple(8, 1, 2));
auto result = make_tuple(1, make_tuple(4, 32, 32));
ASSERT_TRUE((compact_col_major(test) == result));
}
{
auto test = make_tuple(4, make_tuple(8, 1, make_tuple( 2, 3)));
auto result = make_tuple(1, make_tuple(4, 32, make_tuple(32, 64)));
ASSERT_TRUE((compact_col_major(test) == result));
}
}
TEST(CuTe_core, CompactRowMajor_Static)
{
using namespace cute;
CUTE_STATIC_ASSERT_V((compact_row_major(Int<1>{}) == Int<0>{}));
CUTE_STATIC_ASSERT_V((compact_row_major(Int<1>{}, Int<3>{}) == Int<0>{}));
CUTE_STATIC_ASSERT_V((compact_row_major(Int<8>{}) == Int<1>{}));
CUTE_STATIC_ASSERT_V((compact_row_major(Int<8>{}, Int<3>{}) == Int<3>{}));
CUTE_STATIC_ASSERT_V((compact_row_major(1) == Int<1>{}));
CUTE_STATIC_ASSERT_V((compact_row_major(8) == Int<1>{}));
{
auto test = make_tuple(Int<4>{}, Int<8>{});
auto result = make_tuple(Int<8>{}, Int<1>{});
CUTE_STATIC_ASSERT_V((compact_row_major(test) == result));
}
{
auto test = make_tuple(Int< 4>{}, Int<8>{}, Int<2>{});
auto result = make_tuple(Int<16>{}, Int<2>{}, Int<1>{});
CUTE_STATIC_ASSERT_V((compact_row_major(test) == result));
}
{
auto test = make_tuple(Int< 4>{}, Int<8>{}, Int<1>{}, Int<2>{});
auto result = make_tuple(Int<16>{}, Int<2>{}, Int<0>{}, Int<1>{});
CUTE_STATIC_ASSERT_V((compact_row_major(test) == result));
}
{
auto test = make_tuple(make_tuple(Int< 4>{}, Int<8>{}), Int<1>{}, Int<2>{});
auto result = make_tuple(make_tuple(Int<16>{}, Int<2>{}), Int<0>{}, Int<1>{});
CUTE_STATIC_ASSERT_V((compact_row_major(test) == result));
}
{
auto test = make_tuple(Int< 4>{}, make_tuple(Int<8>{}, Int<1>{}, Int<2>{}));
auto result = make_tuple(Int<16>{}, make_tuple(Int<2>{}, Int<0>{}, Int<1>{}));
CUTE_STATIC_ASSERT_V((compact_row_major(test) == result));
}
{
auto test = make_tuple(Int< 4>{}, make_tuple(Int<8>{}, Int<1>{}, make_tuple(Int<2>{}, Int<3>{})));
auto result = make_tuple(Int<48>{}, make_tuple(Int<6>{}, Int<0>{}, make_tuple(Int<3>{}, Int<1>{})));
CUTE_STATIC_ASSERT_V((compact_row_major(test) == result));
}
}
TEST(CuTe_core, CompactRowMajor_Dynamic)
{
using namespace cute;
ASSERT_TRUE((compact_row_major(1) == 1));
ASSERT_TRUE((compact_row_major(1, 3) == 3));
ASSERT_TRUE((compact_row_major(8) == 1));
ASSERT_TRUE((compact_row_major(8, 3) == 3));
ASSERT_TRUE((compact_row_major(1) == 1));
ASSERT_TRUE((compact_row_major(8) == 1));
{
auto test = make_tuple(4, 8);
auto result = make_tuple(8, 1);
ASSERT_TRUE((compact_row_major(test) == result));
}
{
auto test = make_tuple( 4, 8, 2);
auto result = make_tuple(16, 2, 1);
ASSERT_TRUE((compact_row_major(test) == result));
}
{
auto test = make_tuple( 4, 8, 1, 2);
auto result = make_tuple(16, 2, 2, 1);
ASSERT_TRUE((compact_row_major(test) == result));
}
{
auto test = make_tuple(make_tuple( 4, 8), 1, 2);
auto result = make_tuple(make_tuple(16, 2), 2, 1);
ASSERT_TRUE((compact_row_major(test) == result));
}
{
auto test = make_tuple( 4, make_tuple(8, 1, 2));
auto result = make_tuple(16, make_tuple(2, 2, 1));
ASSERT_TRUE((compact_row_major(test) == result));
}
{
auto test = make_tuple( 4, make_tuple(8, 1, make_tuple(2, 3)));
auto result = make_tuple(48, make_tuple(6, 6, make_tuple(3, 1)));
ASSERT_TRUE((compact_row_major(test) == result));
}
}
| test/unit/cute/core/compact_xmajor.cpp/0 | {
"file_path": "test/unit/cute/core/compact_xmajor.cpp",
"repo_id": "test",
"token_count": 3509
} | 48 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cutlass/trace.h>
#include <cute/pointer.hpp>
TEST(CuTe_core, Pointer)
{
using namespace cute;
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("CuTe pointer wrappers");
CUTLASS_TRACE_HOST("-------------------------------");
// Test T* overloads (T can be nonconst or const)
{
using T = float;
using expected_type = cute::gmem_ptr<T*>;
T* p = nullptr;
// explicit template argument
auto gmem_p0 = cute::make_gmem_ptr<T>(p);
static_assert(cute::is_same_v<decltype(gmem_p0), expected_type>);
// deduced template argument
auto gmem_p1 = cute::make_gmem_ptr(p);
static_assert(cute::is_same_v<decltype(gmem_p1), expected_type>);
}
{
using T = float const;
using expected_type = cute::gmem_ptr<T*>;
T* p = nullptr;
// explicit template argument
auto gmem_p0 = cute::make_gmem_ptr<T>(p);
static_assert(cute::is_same_v<decltype(gmem_p0), expected_type>);
// deduced template argument
auto gmem_p1 = cute::make_gmem_ptr(p);
static_assert(cute::is_same_v<decltype(gmem_p1), expected_type>);
}
// Test void* and void const* overloads
// (these require an explicit template argument)
{
using T = float;
using expected_type = cute::gmem_ptr<T*>;
void* p = nullptr;
auto gmem_p0 = cute::make_gmem_ptr<T>(p);
static_assert(cute::is_same_v<decltype(gmem_p0), expected_type>);
}
{
using T = float const;
using expected_type = cute::gmem_ptr<T*>;
void const* p = nullptr;
auto gmem_p0 = cute::make_gmem_ptr<T>(p);
static_assert(cute::is_same_v<decltype(gmem_p0), expected_type>);
}
// Test nullptr_t overload.
{
using T = float;
using expected_type = cute::gmem_ptr<T*>;
auto gmem_p0 = cute::make_gmem_ptr<T>(nullptr);
static_assert(cute::is_same_v<decltype(gmem_p0), expected_type>);
}
{
using T = float const;
using expected_type = cute::gmem_ptr<T*>;
auto gmem_p0 = cute::make_gmem_ptr<T>(nullptr);
static_assert(cute::is_same_v<decltype(gmem_p0), expected_type>);
}
}
| test/unit/cute/core/pointer.cpp/0 | {
"file_path": "test/unit/cute/core/pointer.cpp",
"repo_id": "test",
"token_count": 1348
} | 49 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/epilogue/thread/linear_combination_drelu.h"
#include "cutlass/gemm/warp/default_mma_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_with_reduction.h"
#include "cutlass/epilogue/threadblock/epilogue_with_reduction.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "epilogue_with_reduction_testbed.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Disable selected tests on CUDA 11.1
//
//
#define ENABLE_BLOCKED_TESTS (!(__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ == 1))
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_64x64_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_64x64_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_128x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_128x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_128x64_64x32x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if ENABLE_BLOCKED_TESTS
TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_128x64_64x32x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_64x128_32x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_64x128_32x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_128x256_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_128x256_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<128, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f32_tensor_op_256x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = float;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<256, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_Epilogue_with_reduction_threadblock, f16_tensor_op_256x128_64x64x8) {
//
// Define the warp-level matrix multiply
//
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using ElementCompute = float;
int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value;
int const kPartitionsK = 1;
using Shape = cutlass::gemm::GemmShape<256, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::half_t;
using ElementC = ElementAccumulator;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous<
cutlass::sizeof_bits<Element>::value, 64>;
using LayoutC = cutlass::layout::RowMajor;
using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp<
WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
LayoutC>::Type;
//
// Output operator
//
using OutputOp = cutlass::epilogue::thread::LinearCombinationDRelu<
ElementAccumulator,
ElementAccumulator,
ElementOutput,
ElementOutput,
kElementsPerAccess
>;
using ReductionOp = cutlass::plus<ElementAccumulator>;
//
// Define the epilogue
//
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueWithReductionTensorOp<
Shape,
WarpMmaTensorOp,
kPartitionsK,
ElementOutput,
OutputOp,
ReductionOp,
kElementsPerAccess
>::Epilogue;
//
// Instantiate epilogue
//
EpilogueWithReductionTestbed<Epilogue> testbed;
bool passed = testbed.run_all();
EXPECT_TRUE(passed);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/threadblock/epilogue_with_reduction_tensor_op.cu/0 | {
"file_path": "test/unit/epilogue/threadblock/epilogue_with_reduction_tensor_op.cu",
"repo_id": "test",
"token_count": 8687
} | 50 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface with:
A: row major, of type FE4M4 or FE5M2
B: column major, of type FE4M3 or FE5M2
C: row major, of FE4M3 or FE5M2
Accum: F32
*/
#include <iostream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/epilogue/thread/linear_combination_generic_with_scaling.h"
#include "cutlass/gemm/device/gemm_universal_with_absmax.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "testbed.h"
#include "testbed_with_absmax.h"
#if defined(CUTLASS_ARCH_MMA_SM89_SUPPORTED)
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, identity_128x256x64_64x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, identity_fastacc_128x256x64_64x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
static int const kAlignment = 16;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages,
kAlignment, kAlignment, cutlass::arch::OpMultiplyAddFastAccum
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, relu_128x256x64_64x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::ReLu,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::ReLu>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe5m2n_fe4m3t_tensor_op_f32, identity_128x256x64_64x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e5m2_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe5m2t_fe4m3n_fe4m3t_tensor_op_f32, identity_128x256x64_64x64x64) {
using ElementA = cutlass::float_e5m2_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe5m2t_fe5m2n_fe4m3t_tensor_op_f32, identity_128x256x64_64x64x64) {
using ElementA = cutlass::float_e5m2_t;
using ElementB = cutlass::float_e5m2_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe5m2t_tensor_op_f32, identity_128x256x64_64x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e5m2_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe5m2t_fe5m2n_fe5m2t_tensor_op_f32, identity_diff_aux_output_types_128x256x64_64x64x64) {
using ElementA = cutlass::float_e5m2_t;
using ElementB = cutlass::float_e5m2_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = cutlass::float_e5m2_t;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, identity_128x128x64_32x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 128, 64>, cutlass::gemm::GemmShape<32, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, identity_noScale_128x256x64_64x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = ElementOutput;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>(
/* scaleA = */false,
/* scaleB = */false,
/* scaleC = */false
);
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
TEST(SM89_Device_Gemm_fe4m3t_fe4m3n_fe4m3t_tensor_op_f32, identity_noAux_128x256x64_64x64x64) {
using ElementA = cutlass::float_e4m3_t;
using ElementB = cutlass::float_e4m3_t;
using ElementOutput = cutlass::float_e4m3_t;
using ElementAuxOutput = float;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
static int const kStages = 3;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombinationGenericWithScalingAndAbsMax<
cutlass::epilogue::thread::Identity,
ElementOutput,
ElementAuxOutput,
128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator,
ElementAccumulator
>;
using Gemm = cutlass::gemm::device::GemmUniversalWithAbsMax<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC,
ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm89,
cutlass::gemm::GemmShape<128, 256, 64>, cutlass::gemm::GemmShape<64, 64, 64>, cutlass::gemm::GemmShape<16, 8, 32>,
EpilogueOutputOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, kStages
>;
bool passed = test::gemm::device::TestAllGemmWithAbsmax<Gemm, test::gemm::device::Testbed<Gemm>, cutlass::epilogue::thread::Identity>();
EXPECT_TRUE(passed);
}
////////////////////////////////////////////////////////////////////////////////
#endif // CUTLASS_ARCH_MMA_SM89_SUPPORTED
| test/unit/gemm/device/gemm_f8t_f8n_f8t_tensor_op_f32_sm89.cu/0 | {
"file_path": "test/unit/gemm/device/gemm_f8t_f8n_f8t_tensor_op_f32_sm89.cu",
"repo_id": "test",
"token_count": 6677
} | 51 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/host_reorder.h"
namespace test {
namespace gemm {
namespace device {
////////////////////////////////////////////////////////////////////////////////
template <typename Gemm, int InterleavedK>
struct InterleavedTestbed {
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using ElementCompute = typename Gemm::GemmKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
//
// Methods
//
InterleavedTestbed(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
cutlass::reference::host::TensorFillRandomUniform(
view, seed, 2, -2, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Waives test if CUDA device is insufficient
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
//
// Allocate the GEMM workspace
//
cutlass::HostTensor<
typename Gemm::ElementA,
typename Gemm::LayoutA> tensor_A(problem_size.mk());
cutlass::HostTensor<
typename Gemm::ElementB,
typename Gemm::LayoutB> tensor_B(problem_size.kn());
cutlass::HostTensor<
typename Gemm::ElementB,
typename Gemm::LayoutB> tensor_B_reordered(problem_size.kn());
cutlass::HostTensor<
typename Gemm::ElementC,
typename Gemm::LayoutC> tensor_C(problem_size.mn());
cutlass::HostTensor<
typename Gemm::ElementC,
typename Gemm::LayoutC> tensor_D(problem_size.mn());
cutlass::HostTensor<
typename Gemm::ElementC,
typename Gemm::LayoutC> reference_D(problem_size.mn(), false);
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
cutlass::reorder_column<InterleavedK>(
tensor_B_reordered.host_ref(), tensor_B.host_ref(), problem_size);
cutlass::reference::host::TensorCopy(
reference_D.host_view(),
tensor_C.host_view());
tensor_A.sync_device();
tensor_B_reordered.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
problem_size,
tensor_A.device_ref(),
tensor_B_reordered.device_ref(),
tensor_C.device_ref(),
tensor_D.device_ref(),
{alpha, beta}
};
Gemm gemm_op;
cutlass::Status status = gemm_op.initialize(arguments);
EXPECT_TRUE(status == cutlass::Status::kSuccess);
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
//
// Verify
//
cutlass::reference::host::Gemm<
typename Gemm::ElementA, typename Gemm::LayoutA,
typename Gemm::ElementB, typename Gemm::LayoutB,
typename Gemm::ElementC, typename Gemm::LayoutC, ElementCompute,
ElementAccumulator, typename Gemm::Operator>
reference_gemm;
reference_gemm(
problem_size,
alpha,
tensor_A.host_ref(),
tensor_B.host_ref(),
beta,
reference_D.host_ref(),
ElementAccumulator(0)
);
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(
reference_D.host_view(),
tensor_D.host_view());
EXPECT_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_Gemm_device_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< Gemm::ThreadblockShape::kM << "x"
<< Gemm::ThreadblockShape::kN << "x"
<< Gemm::ThreadblockShape::kK << "_"
<< Gemm::WarpShape::kM << "x"
<< Gemm::WarpShape::kN << "x"
<< Gemm::WarpShape::kK << ".txt";
std::ofstream file(fname.str());
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view()
<< "\nB_reordered =\n" << tensor_B_reordered.host_view()
<< "\nC =\n" << tensor_C.host_view()
<< "\n\nReference =\n" << reference_D.host_view()
<< "\nComputed =\n" << tensor_D.host_view();
}
return passed;
}
/// Runs a set of problem sizes
bool run_all() {
bool passed = true;
int problem_size_m[] = {
InterleavedK, 256 + InterleavedK, 512 + InterleavedK
};
int problem_size_n[] = {
InterleavedK, 256 + InterleavedK, 512 + InterleavedK
};
int problem_size_k[] = {
InterleavedK, 256 + InterleavedK, 512 + InterleavedK
};
double problem_alpha[] = {
1.0
};
double problem_beta[] = {
2.0
};
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int k : problem_size_k) {
for (double alpha : problem_alpha) {
for (double beta : problem_beta) {
passed = run(
{m, n, k},
ElementCompute(alpha),
ElementCompute(beta)
);
if (!passed) {
return false;
}
}
}
}
}
}
return true;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_interleaved.h/0 | {
"file_path": "test/unit/gemm/device/testbed_interleaved.h",
"repo_id": "test",
"token_count": 3972
} | 52 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "../../common/cutlass_unit_test.h"
#include "cutlass/core_io.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/gemm/kernel/default_gemv.h"
#include "cutlass/gemm/kernel/gemv_batched_strided.h"
namespace test {
namespace gemm {
namespace kernel {
template<typename ThreadBlockShape_,
typename ThreadShape_,
typename ElementAB_,
typename ElementAccumulator_,
typename ElementCD_,
typename LayoutA_,
typename LayoutB_,
typename LayoutCD_,
int THREAD_B = 1, // batch tile size
bool DEBUG=false>
void batched_gemv_kernel_test(cutlass::gemm::BatchedGemmCoord problem_size,
ElementCD_ alpha = ElementCD_(1),
ElementCD_ beta = ElementCD_(0),
bool perf_test = false,
int perf_test_iter = 1)
{
using ThreadBlockShape = ThreadBlockShape_;
using ThreadShape = ThreadShape_;
using ElementA = ElementAB_;
using LayoutA = LayoutA_;
using ElementB = ElementAB_;
using LayoutB = LayoutB_;
using ElementAccumulator = ElementCD_;
using ElementCD = ElementCD_;
using LayoutCD = LayoutCD_;
using GemvKernel = cutlass::gemm::kernel::DefaultGemv<ThreadBlockShape,
ThreadShape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementCD,
LayoutCD,
ElementAccumulator>;
using ThreadBlockGemv = typename GemvKernel::ThreadBlockGemv;
using ThreadBlockSwizzle = typename GemvKernel::ThreadBlockSwizzle;
if (DEBUG)
{
problem_size = cutlass::gemm::BatchedGemmCoord(
problem_size.m(), problem_size.n(), problem_size.k(), 1);
}
// Create host tensors that will be the backing store for the batches
// Note that no device memory is initially allocated
cutlass::HostTensor<ElementA, LayoutA> matrix_A({problem_size.m(), problem_size.k()}, false);
cutlass::HostTensor<ElementB, LayoutB> matrix_B({problem_size.k(), problem_size.n()}, false);
cutlass::HostTensor<ElementCD, LayoutCD> matrix_C_computed({problem_size.m(), problem_size.n()}, false);
cutlass::HostTensor<ElementCD, LayoutCD> matrix_C_reference({problem_size.m(), problem_size.n()}, false);
// Reserve memory for the batch of tensors
matrix_A.reserve(problem_size.m()*problem_size.k()*problem_size.batch());
matrix_B.reserve(problem_size.n()*problem_size.k()*problem_size.batch());
matrix_C_computed.reserve(problem_size.m()*problem_size.n()*problem_size.batch());
matrix_C_reference.reserve(problem_size.m()*problem_size.n()*problem_size.batch(), false);
// Fill eatch tensor batch
const int seed = 9876;
for (int b = 0; b < problem_size.batch(); b++)
{
if(DEBUG)
{
cutlass::reference::host::BlockFillSequential(
matrix_A.host_data_ptr_offset(b*matrix_A.capacity()), matrix_A.capacity());
cutlass::reference::host::BlockFillSequential(
matrix_B.host_data_ptr_offset(b*matrix_B.capacity()), matrix_B.capacity());
}
else
{
cutlass::reference::host::TensorFillRandomUniform(
matrix_A.host_view(b*matrix_A.capacity()),
seed + 1660,
8,
-8,
0
);
cutlass::reference::host::TensorFillRandomUniform(
matrix_B.host_view(b*matrix_B.capacity()),
seed + 1880,
8,
-8,
0
);
}
cutlass::reference::host::TensorFill(matrix_C_computed.host_view(b*matrix_C_computed.capacity()));
cutlass::reference::host::TensorFill(matrix_C_reference.host_view(b*matrix_C_reference.capacity()));
}
matrix_A.sync_device();
matrix_B.sync_device();
matrix_C_computed.sync_device();
ThreadBlockSwizzle swizzle;
cutlass::gemm::BatchedGemmCoord tiled_size{ThreadBlockShape::kM,
ThreadBlockShape::kN,
problem_size.k(), // no split-k
DEBUG ? 1 : THREAD_B };
cutlass::gemm::BatchedGemmCoord tiled_shape = swizzle.get_tiled_shape(problem_size, tiled_size);
#if 0
printf("tiled_size = %d %d %d %d\n", tiled_size.m(), tiled_size.n(), tiled_size.k(), tiled_size.batch());
printf("tiled_shape = %d %d %d %d\n", tiled_shape.m(), tiled_shape.n(), tiled_shape.k(), tiled_shape.batch());
#endif
// No split-k
EXPECT_EQ(tiled_size.k(), problem_size.k());
dim3 grid = swizzle.get_grid_shape(tiled_shape);
dim3 block(tiled_size.n() / ThreadShape::kN, tiled_size.batch(), tiled_size.k() / problem_size.k());
// Some sanity checks
EXPECT_TRUE( block.x*block.y*block.z <= 1024 );
EXPECT_TRUE( block.x <= 1024 );
EXPECT_TRUE( block.y <= 1024 );
EXPECT_TRUE( block.z <= 64 );
#if 0
printf("grid dim = %d, %d, %d\n", grid.x, grid.y, grid.z);
printf("block dim = %d, %d, %d\n", block.x, block.y, block.z);
#endif
cudaError_t result;
cudaEvent_t start_event, end_event;
for (int iter = 0; iter < (perf_test ? (perf_test_iter+1) : 1); ++iter)
{
if (perf_test && iter == 1)
{
result = cudaEventCreate(&start_event);
EXPECT_EQ(result, cudaSuccess);
result = cudaEventCreate(&end_event);
EXPECT_EQ(result, cudaSuccess);
result = cudaEventRecord(start_event);
EXPECT_EQ(result, cudaSuccess);
}
if (beta == ElementCD(0))
{
if (alpha == ElementCD(1))
{
cutlass::gemm::kernel::GemvBatchedStrided<GemvKernel><<< grid, block >>>(
problem_size,
matrix_A.device_ref(),
matrix_A.capacity(),
matrix_B.device_ref(),
matrix_B.capacity(),
matrix_C_computed.device_ref(),
matrix_C_computed.capacity()
);
}
else
{
cutlass::gemm::kernel::GemvBatchedStrided<GemvKernel><<< grid, block >>>(
problem_size,
alpha,
matrix_A.device_ref(),
matrix_A.capacity(),
matrix_B.device_ref(),
matrix_B.capacity(),
matrix_C_computed.device_ref(),
matrix_C_computed.capacity()
);
}
}
else
{
cutlass::gemm::kernel::GemvBatchedStrided<GemvKernel, ElementCD, false><<< grid, block >>>(
problem_size,
alpha,
beta,
matrix_A.device_ref(),
matrix_A.capacity(),
matrix_B.device_ref(),
matrix_B.capacity(),
matrix_C_computed.device_ref(),
matrix_C_computed.capacity(),
matrix_C_computed.device_ref(),
matrix_C_computed.capacity()
);
}
if (iter == 0)
{
result = cudaGetLastError();
EXPECT_EQ(result, cudaSuccess) << " kernel error: " << cudaGetErrorString(result);
}
}
if (perf_test)
{
result = cudaEventRecord(end_event);
EXPECT_EQ(result, cudaSuccess);
}
result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " kernel error: " << cudaGetErrorString(result);
if (perf_test)
{
float ms;
result = cudaEventElapsedTime(&ms, start_event, end_event);
EXPECT_EQ(result, cudaSuccess);
double flops = (double(problem_size.m()) *
double(problem_size.n()) *
double(problem_size.k()) *
double(problem_size.batch()) * 2); // 2 for MAC
double read_bytes = double(problem_size.batch()) * (sizeof(ElementA)*double(problem_size.m())*double(problem_size.k()) +
sizeof(ElementB)*double(problem_size.k())*double(problem_size.n()));
double write_bytes = double(problem_size.batch()) * (sizeof(ElementCD)*double(problem_size.m())*double(problem_size.n()));
double avg_runtime = double(ms) / perf_test_iter;
double gflops_per_sec = flops / 1.0e6 / avg_runtime;
double read_bandwidth = read_bytes / 1.0e6 / avg_runtime;
double write_bandwidth = write_bytes / 1.0e6 / avg_runtime;
std::cout << "\n\nProblem size: "
<< problem_size.m()
<< " x " << problem_size.n()
<< " x " << problem_size.k()
<< " x " << problem_size.batch()
<< std::endl;
std::cout << " GFLOPs: " << gflops_per_sec << std::endl;
std::cout << "BW (R/W): " << read_bandwidth << " / " << write_bandwidth << " GB/sec" << std::endl;
std::cout << " Runtime: " << avg_runtime << " ms" << std::endl;
}
else
{
matrix_C_computed.sync_host();
// Compute the batched gemms
for (int b = 0; b < problem_size.batch(); b++)
{
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementCD, LayoutCD, ElementCD,
ElementCD>
reference_gemm;
reference_gemm(
problem_size.mnk(), alpha,
matrix_A.host_ref(b * matrix_A.capacity()),
matrix_B.host_ref(b * matrix_B.capacity()), beta,
matrix_C_reference.host_ref(b * matrix_C_computed.capacity()));
bool passed = cutlass::reference::host::TensorEquals(
matrix_C_computed.host_view(b * matrix_C_computed.capacity()),
matrix_C_reference.host_view(b * matrix_C_reference.capacity()));
EXPECT_TRUE(passed)
//<< "A:\n" << matrix_A.host_view() << "\n"
//<< "B:\n" << matrix_B.host_view() << "\n"
<< "Batch: " << b << "\n"
<< "Reference:\n"
<< matrix_C_reference.host_view(b * matrix_C_reference.capacity())
<< "\n"
<< "Computed:\n"
<< matrix_C_computed.host_view(b * matrix_C_computed.capacity())
<< "\n";
}
}
}
template<typename ThreadBlockShape_,
typename ThreadShape_,
typename ElementAB_,
typename ElementAccumulator_,
typename ElementCD_,
typename LayoutA_,
typename LayoutB_,
typename LayoutCD_,
int THREAD_B = 1, // batch tile size
bool DEBUG=false>
void batched_gemv_kernel_perf_test(cutlass::gemm::BatchedGemmCoord problem_size,
ElementCD_ alpha = ElementCD_(1),
ElementCD_ beta = ElementCD_(0),
int iter = 50)
{
batched_gemv_kernel_test<ThreadBlockShape_,
ThreadShape_,
ElementAB_,
ElementAccumulator_,
ElementCD_,
LayoutA_,
LayoutB_,
LayoutCD_,
THREAD_B,
DEBUG>(problem_size, alpha, beta, true, iter);
}
} // namespace threadblock
} // namespace kernel
} // namespace test
| test/unit/gemm/kernel/testbed_gemv.h/0 | {
"file_path": "test/unit/gemm/kernel/testbed_gemv.h",
"repo_id": "test",
"token_count": 7083
} | 53 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit testbed for kernel-level GEMM
*/
#pragma once
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/core_io.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/threadblock/predicated_tile_access_iterator.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
namespace test {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <typename Mma>
__global__ void kernel_multistage_mma(cutlass::gemm::GemmCoord problem_size,
typename Mma::IteratorA::Params params_A,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::Params params_B,
typename Mma::IteratorB::TensorRef ref_B,
typename Mma::ElementC *ptr_C,
typename Mma::LayoutC::Stride::Index ldc) {
// Shared storage needed by threadblock-scoped matrix multiply-accumulate
// Dynamic shared memory base pointer
extern __shared__ int GemmSharedStorageBase[];
// Declare pointer to dynamic shared memory.
typename Mma::SharedStorage *shared_storage =
reinterpret_cast<typename Mma::SharedStorage *>(GemmSharedStorageBase);
// Compute threadblock location
cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y),
0};
cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM,
tb_tile_offset.k()};
cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(),
tb_tile_offset.n() * Mma::Shape::kN};
// Compute position within threadblock
int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(params_A, ref_A.data(),
{problem_size.m(), problem_size.k()},
tb_thread_id, tb_offset_A);
typename Mma::IteratorB iterator_B(params_B, ref_B.data(),
{problem_size.k(), problem_size.n()},
tb_thread_id, tb_offset_B);
int warp_id = __shfl_sync(0xffffffff, threadIdx.y, 0);
// Construct thread-scoped matrix multiply
Mma mma(*shared_storage, tb_thread_id, warp_id, threadIdx.x);
typename Mma::FragmentC accum;
accum.clear();
int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum);
// Output results
typename Mma::Operator::IteratorC iterator_C({ptr_C, ldc}, threadIdx.x);
iterator_C.add_tile_offset(
{(tb_tile_offset.m() * Mma::WarpCount::kM) +
(warp_id % Mma::WarpCount::kM),
(tb_tile_offset.n() * Mma::WarpCount::kN) +
(warp_id / Mma::WarpCount::kM)});
iterator_C.store(accum);
}
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Threadblock-level matrix multiply-accumulate
typename MmaCore_>
struct Testbed {
/// Threadblock-level GEMM implementation
using MmaCore = MmaCore_;
using ThreadblockShape = typename MmaCore::Shape;
using WarpShape = typename MmaCore::WarpShape;
using InstructionShape = typename MmaCore::InstructionShape;
using ElementA = typename MmaCore::ElementA;
using LayoutA = typename MmaCore::LayoutA;
using ElementB = typename MmaCore::ElementB;
using LayoutB = typename MmaCore::LayoutB;
using ElementC = typename MmaCore::ElementC;
using LayoutC = typename MmaCore::LayoutC;
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeA = cutlass::Array<ElementA, ThreadMapA::kElementsPerAccess>;
using AccessTypeB = cutlass::Array<ElementB, ThreadMapB::kElementsPerAccess>;
static int const Stages = MmaCore::kStages;
static cutlass::arch::CacheOperation::Kind const CacheOpA =
MmaCore::kCacheOpA;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
MmaCore::kCacheOpB;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>;
// Define the threadblock-scoped pipelined matrix multiply
using Mma = cutlass::gemm::threadblock::MmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
CacheOpA, IteratorB, typename MmaCore::SmemIteratorB, CacheOpB, ElementC,
LayoutC, typename MmaCore::MmaPolicy, Stages>;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> matrix_A;
cutlass::HostTensor<ElementB, LayoutB> matrix_B;
cutlass::HostTensor<ElementC, LayoutC> matrix_C_computed;
cutlass::HostTensor<ElementC, LayoutC> matrix_C_reference;
cutlass::gemm::GemmCoord problem_size;
float alpha, beta;
//
// Methods
//
/// Allocates workspace in device memory
Testbed(int m, int n, int k, float alpha_ = float(1), float beta_ = float(0))
: problem_size(m, n, k), alpha(alpha_), beta(beta_) {
matrix_A.reset(cutlass::make_Coord(m, k));
matrix_B.reset(cutlass::make_Coord(k, n));
matrix_C_computed.reset(cutlass::make_Coord(m, n));
matrix_C_reference.reset(cutlass::make_Coord(m, n), false);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
return true;
}
/// Runs the test
bool run(
dim3 grid, dim3 block,
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) {
if (!sufficient()) {
return true;
}
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementA>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementA>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_A.host_view(), seed, scope_max, scope_min, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(matrix_A.host_data(),
matrix_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(matrix_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementB>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementB>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_B.host_view(), seed + 16, scope_max, scope_min, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(matrix_B.host_data(),
matrix_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(matrix_B.host_view());
} else {
return false;
}
cutlass::reference::host::TensorFill(matrix_C_computed.host_view());
cutlass::reference::host::TensorFill(matrix_C_reference.host_view());
matrix_A.sync_device();
matrix_B.sync_device();
matrix_C_computed.sync_device();
typename IteratorA::Params params_A(matrix_A.layout());
typename IteratorB::Params params_B(matrix_B.layout());
cudaError_t result;
int smem_size = int(sizeof(typename Mma::SharedStorage));
if (smem_size >= (48 << 10)) {
result = cudaFuncSetAttribute(
test::gemm::threadblock::kernel_multistage_mma<Mma>,
cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size);
if (result != cudaSuccess) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
result = cudaFuncSetAttribute(
test::gemm::threadblock::kernel_multistage_mma<Mma>,
cudaFuncAttributePreferredSharedMemoryCarveout, 100);
if (result != cudaSuccess) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
}
test::gemm::threadblock::kernel_multistage_mma<Mma>
<<<grid, block, smem_size, 0>>>(
problem_size, params_A, matrix_A.device_ref(), params_B,
matrix_B.device_ref(), matrix_C_computed.device_data(),
matrix_C_computed.layout().stride(0));
//
// Check error code
//
result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess)
<< " kernel error: " << cudaGetErrorString(result);
matrix_C_computed.sync_host();
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC> reference_gemm;
reference_gemm(
problem_size, ElementC(alpha), matrix_A.host_view(),
matrix_B.host_view(), ElementC(beta), matrix_C_reference.host_view());
bool passed = cutlass::reference::host::TensorEquals(
matrix_C_computed.host_view(), matrix_C_reference.host_view());
EXPECT_TRUE(passed);
if (!passed && CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cout
<< __FILE__ << ":" << __LINE__ << " "
<< "A:\n" << matrix_A.host_view() << "\n"
<< "B:\n" << matrix_B.host_view() << "\n"
<< "Reference:\n"
<< matrix_C_reference.host_view() << "\n"
<< "Computed:\n"
<< matrix_C_computed.host_view() << "\n";
}
EXPECT_GT(cutlass::reference::host::TensorNorm(matrix_C_reference.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(matrix_C_computed.host_view()), 0);
return passed;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace test
| test/unit/gemm/threadblock/mma_multistage_testbed.h/0 | {
"file_path": "test/unit/gemm/threadblock/mma_multistage_testbed.h",
"repo_id": "test",
"token_count": 5489
} | 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "cutlass/cutlass.h"
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/gemm/warp/default_mma_complex_tensor_op.h"
#include "cutlass/core_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
#include "testbed.h"
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
////////////////////////////////////////////////////////////////////////////////////////////////////
// complex<double> * complex<double> => complex<double>
// Input data type: complex<double>
// Math instruction: mma.sync.aligned.m8n8k4.f64.f64.f64.f64
// Output data type: complex<double>
///////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_complex_tensor_op_f64, 8x8x4_8x8x4_nt) {
using Shape = cutlass::gemm::GemmShape<8, 8, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<8, 8, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 16x16x4_8x8x4_nt) {
using Shape = cutlass::gemm::GemmShape<16, 16, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 16x32x4_8x8x4_nt) {
using Shape = cutlass::gemm::GemmShape<16, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<16, 32, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x16x4_8x8x4_nt) {
using Shape = cutlass::gemm::GemmShape<32, 16, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<32, 16, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x4_8x8x4_nt) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x4_8x8x4_nh) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
cutlass::ComplexTransform::kConjugate
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x4_8x8x4_ct) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kConjugate,
cutlass::ComplexTransform::kNone
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 8x8x4_8x8x4_tn) {
using Shape = cutlass::gemm::GemmShape<8, 8, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<8, 8, 4> >().run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f64, 16x16x4_8x8x4_tn) {
using Shape = cutlass::gemm::GemmShape<16, 16, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 4> >().run();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
// complex<float> * complex<float> => complex<float>
// Input data type: complex<float>
// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
// Output data type: complex<float>
// Shared memory layout: Congrous
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x8_16x8x8_nt) {
using Shape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 8> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x16_16x8x8_nt) {
using Shape = cutlass::gemm::GemmShape<16, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x32x8_16x8x8_nt) {
using Shape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<16, 32, 8> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x16x8_16x16x8_nt) {
using Shape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 16, 8> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_nt) {
using Shape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_nh) {
using Shape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kNone,
cutlass::ComplexTransform::kConjugate
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_ct) {
using Shape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b;
using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor,
cutlass::ComplexTransform::kConjugate,
cutlass::ComplexTransform::kNone
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// complex<float> * complex<float> => complex<float>
// Input data type: complex<float>
// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
// Output data type: complex<float>
// Shared memory layout: Crosswise
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x8_16x8x8_tn) {
using Shape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 8> >()
.run();
}
// TEST FAILS crosswise complex<float> TN mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 test fails for k = 2*8 = 16
TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x16_16x8x8_tn) {
using Shape = cutlass::gemm::GemmShape<16, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_tn) {
using Shape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x64x8_16x8x8_tn) {
using Shape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 64, 8> >()
.run();
}
TEST(SM80_warp_gemm_complex_tensor_op_f32, 64x32x8_16x8x8_tn) {
using Shape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Element = cutlass::complex<float>;
using ElementC = cutlass::complex<float>;
using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise;
using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<64, 32, 8> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x8_8x8x4_tn) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x8_8x8x4_nt) {
using Shape = cutlass::gemm::GemmShape<32, 32, 4>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Element = cutlass::complex<double>;
using ElementC = cutlass::complex<double>;
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp<
Shape,
InstructionShape,
Element,
LayoutA,
Element,
LayoutB,
ElementC,
cutlass::layout::RowMajor
>::Type;
test::gemm::warp::TransformedTestbedComplex<
MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >()
.run();
}
////////////////////////////////////////////////////////////////////////////////////////////////
#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
| test/unit/gemm/warp/gemm_complex_sm80.cu/0 | {
"file_path": "test/unit/gemm/warp/gemm_complex_sm80.cu",
"repo_id": "test",
"token_count": 8445
} | 55 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level Reduction
*/
#pragma once
#include "cutlass/reduction/thread/reduce.h"
#include "cutlass/layout/vector.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
namespace test {
namespace reduction {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the reduction
template <
/// Data type of elements
typename Element,
/// Number of elements
int N
>
struct Testbed_reduce_host {
/// Thread-level reduction operator
using Reduce = cutlass::reduction::thread::Reduce<
cutlass::plus<Element>,
cutlass::Array<Element, N>
>;
//
// Data members
//
cutlass::Array<Element, N> tensor_in;
cutlass::Array<Element, 1> reduced_tensor_computed;
cutlass::Array<Element, 1> reduced_tensor_reference;
//
// Methods
//
/// Allocates workspace in device memory
Testbed_reduce_host() {
tensor_in.clear();
reduced_tensor_computed.clear();
reduced_tensor_reference.clear();
}
/// Runs the test
bool run() {
//
// initialize memory
//
for(int i = 0; i < N; i++)
tensor_in.at(i) = Element(i);
Reduce reduce;
cutlass::Array<Element, 1> *out_ptr = &reduced_tensor_computed;
out_ptr[0] = reduce(tensor_in);
//
// Reference implementation
//
Element e(0);
for (int i = 0; i < N; i++)
e = e + Element(i);
reduced_tensor_reference.at(0) = e;
//
// Verify equivalence
//
// compare
bool passed = reduced_tensor_reference[0] == reduced_tensor_computed[0];
EXPECT_TRUE(passed)
<< "Expected = " << float(reduced_tensor_reference.at(0)) << "\n\n"
<< "Actual = " << float(reduced_tensor_computed.at(0)) << "\n\n"
<< std::endl;
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Thread-level reduction kernel
template <typename Element, int N>
__global__ void kernel_reduce(Element const *array_in, Element *result) {
/// Thread-level reduction operator
using Reduce = cutlass::reduction::thread::Reduce<
cutlass::plus<Element>,
cutlass::Array<Element, N>
>;
Reduce reduce;
auto ptr_in = reinterpret_cast<cutlass::Array<Element , N> const *>(array_in);
auto result_ptr = reinterpret_cast<cutlass::Array<Element , 1> *>(result);
auto in = *ptr_in;
result_ptr[0] = reduce(in);
}
/// Structure to compute the reduction
template <
/// Data type of elements
typename Element,
/// Number of elements
int N
>
struct Testbed_reduce_device {
using Layout = cutlass::layout::PackedVectorLayout;
//
// Data members
//
cutlass::HostTensor<Element, Layout> tensor_in;
cutlass::HostTensor<Element, Layout> reduced_tensor_computed;
cutlass::HostTensor<Element, Layout> reduced_tensor_reference;
//
// Methods
//
/// Allocates workspace in device memory
Testbed_reduce_device() {
tensor_in.reset(cutlass::make_Coord(N), true);
reduced_tensor_computed.reset(cutlass::make_Coord(1), true);
reduced_tensor_reference.reset(cutlass::make_Coord(1), true);
}
/// Runs the test
bool run() {
//
// initialize memory
//
cutlass::reference::host::TensorFill(
tensor_in.host_view(),
Element(1)
);
cutlass::reference::host::TensorFill(
reduced_tensor_computed.host_view(),
Element(0)
);
cutlass::reference::host::TensorFill(
reduced_tensor_reference.host_view(),
Element(N)
);
tensor_in.sync_device();
reduced_tensor_computed.sync_device();
reduced_tensor_reference.sync_device();
/// call the kernel
kernel_reduce<Element, N><<< dim3(1, 1), dim3(1, 1, 1) >>> (
tensor_in.device_data(),
reduced_tensor_computed.device_data()
);
// verify no errors
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result);
if (result != cudaSuccess) {
return false;
}
// Copy back results
reduced_tensor_computed.sync_host();
// Verify equivalence
bool passed = cutlass::reference::host::TensorEquals(
reduced_tensor_computed.host_view(),
reduced_tensor_reference.host_view()
);
EXPECT_TRUE(passed)
<< "Expected = " << reduced_tensor_reference.host_view() << "\n\n"
<< "Actual = " << reduced_tensor_computed.host_view() << "\n\n"
<< std::endl;
return passed;
}
};
} // namespace thread
} // namespace reduction
} // namespace test
| test/unit/reduction/thread/testbed.h/0 | {
"file_path": "test/unit/reduction/thread/testbed.h",
"repo_id": "test",
"token_count": 2242
} | 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines a math function
*/
#include <algorithm>
#include <stdexcept>
#include <iomanip>
#include <cstring>
#include <fstream>
#include <sstream>
#ifdef __unix__
#include <unistd.h>
#elif defined(_WIN32) || defined(WIN32)
#include <windows.h>
#else
// sleep not supported
#endif
#include "cutlass/profiler/options.h"
#include "cutlass/profiler/operation_profiler.h"
#include "cutlass/profiler/gpu_timer.h"
#include "cutlass/trace.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
///////////////////////////////////////////////////////////////////////////////////////////////////
OperationProfiler::OperationProfiler(): kind_(library::OperationKind::kInvalid) { }
/// Ctor
OperationProfiler::OperationProfiler(
Options const &options,
library::OperationKind kind,
ArgumentDescriptionVector const &arguments,
ProviderVector const & verification_providers
):
kind_(kind), arguments_(arguments) {
ArgumentDescriptionVector tile_description_arguments{
{ArgumentTypeID::kEnumerated, {"op_class", "opcode-class"}, "Class of math instruction (simt, tensorop, wmmatensorop, wmma)"},
{ArgumentTypeID::kEnumerated, {"accum", "accumulator-type"}, "Math instruction accumulator data type"},
{ArgumentTypeID::kInteger, {"cta_m", "threadblock-shape::m"}, "Threadblock shape in the M dimension"},
{ArgumentTypeID::kInteger, {"cta_n", "threadblock-shape::n"}, "Threadblock shape in the N dimension"},
{ArgumentTypeID::kInteger, {"cta_k", "threadblock-shape::k"}, "Threadblock shape in the K dimension"},
{ArgumentTypeID::kInteger, {"cluster_m", "cluster-shape::m"}, "Cluster shape in the M dimension"},
{ArgumentTypeID::kInteger, {"cluster_n", "cluster-shape::n"}, "Cluster shape in the N dimension"},
{ArgumentTypeID::kInteger, {"cluster_k", "cluster-shape::k"}, "Cluster shape in the K dimension"},
{ArgumentTypeID::kInteger, {"stages", "threadblock-stages"}, "Number of stages of threadblock-scoped matrix multiply"},
{ArgumentTypeID::kInteger, {"warps_m", "warp-count::m"}, "Number of warps within threadblock along the M dimension"},
{ArgumentTypeID::kInteger, {"warps_n", "warp-count::n"}, "Number of warps within threadblock along the N dimension"},
{ArgumentTypeID::kInteger, {"warps_k", "warp-count::k"}, "Number of warps within threadblock along the K dimension"},
{ArgumentTypeID::kInteger, {"inst_m", "instruction-shape::m"}, "Math instruction shape in the M dimension"},
{ArgumentTypeID::kInteger, {"inst_n", "instruction-shape::n"}, "Math instruction shape in the N dimension"},
{ArgumentTypeID::kInteger, {"inst_k", "instruction-shape::k"}, "Math instruction shape in the K dimension"},
{ArgumentTypeID::kInteger, {"min_cc", "minimum-compute-capability"}, "Minimum device compute capability"},
{ArgumentTypeID::kInteger, {"max_cc", "maximum-compute-capability"}, "Maximum device compute capability"}
};
arguments_.insert(arguments_.end(), tile_description_arguments.begin(), tile_description_arguments.end());
for (auto provider : verification_providers) {
if (std::find(
options.verification.providers.begin(),
options.verification.providers.end(),
provider) != options.verification.providers.end()) {
verification_providers_.push_back(provider);
}
}
}
/// Destructor
OperationProfiler::~OperationProfiler() {}
/// Gets the schema description
std::string const & OperationProfiler::description() const {
return description_;
}
/// Prints usage statement for the math function
void OperationProfiler::print_usage(std::ostream &out) const {
for (auto const & desc : arguments_) {
size_t const kAliasStart = 10;
size_t columns = 0;
std::string type_str = to_string(desc.type);
columns += type_str.size();
out << " [" << type_str << "]";
if (columns < kAliasStart) {
out << std::string(kAliasStart - columns, ' ');
}
columns = 0;
int j = 0;
for (auto const & alias : desc.aliases) {
columns += alias.size() + (j ? 1 : 0) + 2;
out << (j++ ? "," : "") << "--" << alias;
}
size_t const kTotalColumns = 50;
if (columns < kTotalColumns) {
out << std::string(kTotalColumns - columns, ' ');
}
out << desc.description << "\n";
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if the current operation description satisfies the problem space
bool OperationProfiler::satisfies(
library::OperationDescription const &op_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::OpcodeClassID opcode_class;
if (arg_as_OpcodeClassID(opcode_class, "op_class", problem_space, problem)) {
if (opcode_class != op_desc.tile_description.math_instruction.opcode_class) {
return false;
}
}
int64_t int_value;
if (arg_as_int(int_value, "inst_m", problem_space, problem)) {
if (int64_t(op_desc.tile_description.math_instruction.instruction_shape.m()) != int_value) {
return false;
}
}
if (arg_as_int(int_value, "inst_n", problem_space, problem)) {
if (int64_t(op_desc.tile_description.math_instruction.instruction_shape.n()) != int_value) {
return false;
}
}
if (arg_as_int(int_value, "inst_k", problem_space, problem)) {
if (int64_t(op_desc.tile_description.math_instruction.instruction_shape.k()) != int_value) {
return false;
}
}
if (arg_as_int(int_value, "cta_m", problem_space, problem)) {
if (int64_t(op_desc.tile_description.threadblock_shape.m()) != int_value) {
return false;
}
}
if (arg_as_int(int_value, "cta_n", problem_space, problem)) {
if (int64_t(op_desc.tile_description.threadblock_shape.n()) != int_value) {
return false;
}
}
if (arg_as_int(int_value, "cta_k", problem_space, problem)) {
if (int64_t(op_desc.tile_description.threadblock_shape.k()) != int_value) {
return false;
}
}
if (arg_as_int(int_value, "cluster_m", problem_space, problem)) {
if (int64_t(op_desc.tile_description.cluster_shape.m()) != int_value) {
return false;
}
}
if (arg_as_int(int_value, "cluster_n", problem_space, problem)) {
if (int64_t(op_desc.tile_description.cluster_shape.n()) != int_value) {
return false;
}
}
if (arg_as_int(int_value, "cluster_k", problem_space, problem)) {
if (int64_t(op_desc.tile_description.cluster_shape.k()) != int_value) {
return false;
}
}
if (arg_as_int(int_value, "stages", problem_space, problem)) {
if (int64_t(op_desc.tile_description.threadblock_stages) != int_value) {
return false;
}
}
if (arg_as_int(int_value, "warps_m", problem_space, problem)) {
if (int64_t(op_desc.tile_description.warp_count.m()) != int_value) {
return false;
}
}
if (arg_as_int(int_value, "warps_n", problem_space, problem)) {
if (int64_t(op_desc.tile_description.warp_count.n()) != int_value) {
return false;
}
}
if (arg_as_int(int_value, "warps_k", problem_space, problem)) {
if (int64_t(op_desc.tile_description.warp_count.k()) != int_value) {
return false;
}
}
library::NumericTypeID numeric_type;
if (arg_as_NumericTypeID(numeric_type, "accum", problem_space, problem)) {
if (numeric_type != op_desc.tile_description.math_instruction.element_accumulator) {
return false;
}
}
return true;
}
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
std::ostream& operator<<(std::ostream& out, library::Provider provider) {
if (provider == library::Provider::kNone) {
out << "kNone";
}
else if (provider == library::Provider::kCUTLASS) {
out << "kCUTLASS";
}
else if (provider == library::Provider::kReferenceHost) {
out << "kReferenceHost";
}
else if (provider == library::Provider::kReferenceDevice) {
out << "kReferenceDevice";
}
else if (provider == library::Provider::kCUBLAS) {
out << "kCUBLAS";
}
else if (provider == library::Provider::kCUDNN) {
out << "kCUDNN";
}
else {
out << "kInvalid";
}
return out;
}
std::ostream& operator<<(std::ostream& out, library::OperationKind provider) {
if (provider == library::OperationKind::kGemm) {
out << "kGemm";
}
else if (provider == library::OperationKind::kRankK) {
out << "kRankK";
}
else if (provider == library::OperationKind::kRank2K) {
out << "kRank2K";
}
else if (provider == library::OperationKind::kTrmm) {
out << "kTrmm";
}
else if (provider == library::OperationKind::kSymm) {
out << "kSymm";
}
else if (provider == library::OperationKind::kConv2d) {
out << "kConv2d";
}
else if (provider == library::OperationKind::kConv3d) {
out << "kConv3d";
}
else if (provider == library::OperationKind::kEqGemm) {
out << "kEqGemm";
}
else if (provider == library::OperationKind::kSparseGemm) {
out << "kSparseGemm";
}
else if (provider == library::OperationKind::kReduction) {
out << "kReduction";
}
else {
out << "kInvalid";
}
return out;
}
#endif // defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
/// Entry point to profile all operations in the manifest
int OperationProfiler::profile_all(
Options const &options,
library::Manifest const &manifest,
DeviceContext &device_context) {
ProblemSpace problem_space(arguments_, options.cmdline);
// 1. Construct performance report
PerformanceReport report(options, problem_space.argument_names(), kind_);
// 2. For each problem in problem space
ProblemSpace::Iterator problem_it = problem_space.begin();
ProblemSpace::Iterator problem_end = problem_space.end();
bool continue_profiling = true;
int retval = 0;
// For each problem in problem space
for (; continue_profiling && problem_it != problem_end; ++problem_it) {
ProblemSpace::Problem problem = problem_it.at();
report.next_problem();
// For each operation in manifest
int matched_operation_count = 0;
int profiled_operation_count = 0;
for (auto const& operation_ptr : manifest) {
library::Operation const *operation = operation_ptr.get();
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
std::cerr << " Operation: " << typeid(*operation).name() << "\n"
<< " name: " << operation->description().name << "\n"
<< " kind: " << operation->description().kind << "\n"
<< " provider: " << operation->description().provider << "\n";
#endif // CUTLASS_DEBUG_TRACE_LEVEL
auto min_cc = operation->description().tile_description.minimum_compute_capability;
auto max_cc = operation->description().tile_description.maximum_compute_capability;
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
std::cerr << " min_cc: " << min_cc << "\n";
std::cerr << " max_cc: " << min_cc << "\n";
#endif
// Clear named allocations
device_context.free();
#if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1)
if (operation->description().kind != kind_) {
std::cerr << " @ kind " << operation->description().kind
<< " != kind_ " << kind_ << "\n";
}
if (operation->description().provider != library::Provider::kCUTLASS) {
std::cerr << " @ provider " << operation->description().provider
<< " != library::Provider::kCUTLASS\n";
}
if (options.device.compute_capability() < min_cc) {
std::cerr << " @ compute_capability "
<< options.device.compute_capability()
<< " < min_cc " << min_cc << "\n";
}
if (options.device.compute_capability() > max_cc) {
std::cerr << " @ compute_capability "
<< options.device.compute_capability()
<< " > max_cc " << max_cc << "\n";
}
#endif
// Execute compatible cutlass operations if they satisfy the current device's compute capability
if (operation->description().kind == kind_ &&
operation->description().provider == library::Provider::kCUTLASS &&
options.device.compute_capability() >= min_cc &&
options.device.compute_capability() <= max_cc) {
std::string operation_name(operation->description().name);
// Filter kernels by name
bool filtered_by_name = options.operation_names.empty();
if (!filtered_by_name) {
for (auto const & op_name : options.operation_names) {
if (find_string_matches_(op_name, operation_name)) {
filtered_by_name = true;
break;
}
}
}
for (auto const & op_name : options.excluded_operation_names) {
if (find_string_matches_(op_name, operation_name)) {
filtered_by_name = false;
break;
}
}
if (!filtered_by_name || !satisfies(operation->description(), problem_space, problem)) {
continue;
}
// we have found a kernel match, so increment the counter for match kernels
++matched_operation_count;
// A. Initialize configuration
Status status = this->initialize_configuration(
options,
report,
device_context,
operation,
problem_space,
problem);
if (status == Status::kErrorInternal) {
// If there was an internal error, consume the CUDA error and move to the next operation.
(void)cudaGetLastError();
report.append_result(model_result_);
continue;
}
else if (status != Status::kSuccess) {
// If the workspace could not be initialized for any other reason, continue to
// the next operation.
continue;
}
if (continue_profiling) {
if (options.report.print_kernel_before_running) {
std::cout << "Profiling kernel for JUnit test " << options.report.junit_output_path << ": "
<< operation_name << std::endl;
}
status = this->initialize_workspace(
options,
report,
device_context,
operation,
problem_space,
problem);
if (status == Status::kErrorInternal) {
// If there was an internal error, consume the CUDA error and move to the next operation.
(void)cudaGetLastError();
report.append_results(results_);
continue;
}
else if (status != Status::kSuccess) {
// If the workspace could not be initialized for any other reason, continue to
// the next operation.
continue;
}
}
//
// Profile CUTLASS if it is enabled
//
// B. Verify CUTLASS
if (continue_profiling && options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
continue_profiling = this->verify_cutlass(
options,
report,
device_context,
operation,
problem_space,
problem);
retval |= (not continue_profiling);
}
if (options.execution_mode == ExecutionMode::kDryRun) {
report.append_results(results_);
results_.clear();
continue;
}
//
// C. Optionally save workspace
//
if (options.verification.save_workspace == SaveWorkspace::kAlways) {
save_workspace(
device_context,
options,
operation->description(),
library::Provider::kCUTLASS);
}
//
// D. Profile
//
if (continue_profiling && options.profiling.enabled) {
continue_profiling = this->profile(
options,
report,
device_context,
operation,
problem_space,
problem);
// Count op as profiled, even it failed to profile
profiled_operation_count++;
}
report.append_results(results_);
results_.clear();
} // if op satisfied compute capacity
if (!continue_profiling) {
// break out of `for op in manifest` loop and move to next problem
// `for each problem in problem space` conditional check on not continue profiling
break;
}
} // for op in manifest
// If we did not find any kernels that match our filters and error_on_no_match was set, report an error
if (options.profiling.error_on_no_match && matched_operation_count <= 0) {
#if !NDEBUG
std::cerr << "Error: No matching kernels found with kernel selection filters [--error_on_no_match]" << std::endl;
#endif
retval |= 1;
// Stop profiling on error no match
continue_profiling = false;
}
if (options.profiling.error_if_nothing_is_profiled && options.profiling.enabled && profiled_operation_count <= 0) {
#if !NDEBUG
std::cerr << "Error: No kernels profiled found with kernel selection filters [--error_if_nothing_is_profiled]" << std::endl;
#endif
retval |= 1;
// Stop profiling on error no match
continue_profiling = false;
}
} // for each problem in problem space
return retval;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Sleep for a given duration in ms
void OperationProfiler::sleep(int sleep_duration) {
if (sleep_duration) {
#ifdef __unix__
usleep(sleep_duration * 1000);
#elif defined(_WIN32) || defined(WIN32)
SleepEx(sleep_duration, false);
#else
// sleep not supported
#endif
}
}
/// Compares tensors for equality
Disposition OperationProfiler::compare_tensors(
Options const &options,
DeviceAllocation &experimental,
DeviceAllocation &reference,
int64_t count) {
if (experimental.type() != reference.type()) {
return Disposition::kIncorrect;
}
bool passed = false;
if (count == 0) {
count = reference.capacity();
}
if (options.verification.epsilon == 0) {
// bit-level equality
passed = DeviceAllocation::block_compare_equal(
experimental.type(),
experimental.data(),
reference.data(),
count);
}
else {
// relative error function
passed = DeviceAllocation::block_compare_relatively_equal(
experimental.type(),
experimental.data(),
reference.data(),
count,
options.verification.epsilon,
options.verification.nonzero_floor);
}
return passed ? Disposition::kPassed : Disposition::kIncorrect;
}
/// Saves the workspace
void OperationProfiler::save_workspace(
DeviceContext &device_context,
Options const &options,
library::OperationDescription const &desc,
library::Provider provider,
library::Provider verification_provider) {
for (auto const & named_allocation : device_context) {
DeviceAllocation *allocation = named_allocation.second;
std::stringstream filename;
filename << desc.name << "_" << library::to_string(provider) << "_";
if (verification_provider != library::Provider::kInvalid) {
filename << "verified_by_" << library::to_string(verification_provider) << "_";
}
filename << named_allocation.first + ".mat";
std::ofstream out(filename.str());
allocation->write_tensor_csv(out);
out << "\n";
if (options.report.verbose) {
std::cout << "wrote '" << filename.str() << "'" << std::endl;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Method to profile a CUTLASS Operation
Status OperationProfiler::profile_cutlass_(
double &runtime,
Options const &options,
library::Operation const *operation,
void *arguments,
void *host_workspace,
void *device_workspace) {
GpuTimer timer;
//
// Optional sleep to limit power consumption and thermals
//
sleep(options.profiling.sleep_duration);
//
// Warmup loop
//
Status status;
for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) {
status = operation->run(
arguments,
host_workspace,
device_workspace);
if (status != Status::kSuccess) {
return status;
}
}
//
// Initialize GPU timer
//
timer.start();
//
// Profiling loop
//
int Iterations = options.profiling.iterations;
int iteration = 0;
for (; iteration < Iterations; ++iteration) {
status = operation->run(
arguments,
host_workspace,
device_workspace);
if (status != Status::kSuccess) {
return status;
}
}
//
// Wait for completion
//
timer.stop_and_wait();
//
// Update performance result
//
runtime = timer.duration(iteration);
return status;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Sets operation description
void OperationProfiler::initialize_result_(
PerformanceResult &result,
library::OperationDescription const &operation_desc,
ProblemSpace const &problem_space) {
set_argument(result, "op_class", problem_space,
library::to_string(operation_desc.tile_description.math_instruction.opcode_class));
set_argument(result, "accum", problem_space,
library::to_string(operation_desc.tile_description.math_instruction.element_accumulator));
set_argument(result, "cta_m", problem_space, operation_desc.tile_description.threadblock_shape.m());
set_argument(result, "cta_n", problem_space, operation_desc.tile_description.threadblock_shape.n());
set_argument(result, "cta_k", problem_space, operation_desc.tile_description.threadblock_shape.k());
set_argument(result, "cluster_m", problem_space, operation_desc.tile_description.cluster_shape.m());
set_argument(result, "cluster_n", problem_space, operation_desc.tile_description.cluster_shape.n());
set_argument(result, "cluster_k", problem_space, operation_desc.tile_description.cluster_shape.k());
set_argument(result, "stages", problem_space, operation_desc.tile_description.threadblock_stages);
set_argument(result, "warps_m", problem_space, operation_desc.tile_description.warp_count.m());
set_argument(result, "warps_n", problem_space, operation_desc.tile_description.warp_count.n());
set_argument(result, "warps_k", problem_space, operation_desc.tile_description.warp_count.k());
set_argument(result, "inst_m", problem_space, operation_desc.tile_description.math_instruction.instruction_shape.m());
set_argument(result, "inst_n", problem_space, operation_desc.tile_description.math_instruction.instruction_shape.n());
set_argument(result, "inst_k", problem_space, operation_desc.tile_description.math_instruction.instruction_shape.k());
set_argument(result, "min_cc", problem_space, operation_desc.tile_description.minimum_compute_capability);
set_argument(result, "max_cc", problem_space, operation_desc.tile_description.maximum_compute_capability);
}
/// Helper
void OperationProfiler::set_argument(
PerformanceResult &result,
char const *name,
ProblemSpace const &problem_space,
std::string const &value) {
result.arguments.at(problem_space.argument_index(name)) = make_pair(std::string(name), value);
}
void OperationProfiler::set_argument(
PerformanceResult &result,
char const *name,
ProblemSpace const &problem_space,
int64_t value) {
result.arguments.at(problem_space.argument_index(name)) = make_pair(std::string(name), library::lexical_cast(value));
}
/// finds string matches filter_string in operation_name
bool OperationProfiler::find_string_matches_(
std::string const &filter_string,
std::string const &operation_name) {
// Returns true if all substrings appear in the operation_name in order
// Split filter_string of the format "gemm*f32*nt" to tokens ["gemm", "f32", "nt"]
std::string item;
std::istringstream iss(filter_string);
std::vector<std::string> filter_tokens;
while (std::getline(iss, item, '*')) {
filter_tokens.push_back(item);
}
// Search filter_tokens in operation_name in order
size_t start = 0, idx = 0;
for (auto & token : filter_tokens) {
// Check if characters left to be parsed in operation_name
if (start < operation_name.length()) {
// Find token in operation_name[start:]
idx = operation_name.substr(start).find(token);
if (idx == std::string::npos) {
return false;
}
}
start += (idx + token.length());
}
// All tokens in filter_string found in operation_name
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/operation_profiler.cu/0 | {
"file_path": "tools/profiler/src/operation_profiler.cu",
"repo_id": "tools",
"token_count": 9672
} | 57 |
/******************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* \brief cuda kernels to do group norm on a device memory tensor with NHWC layout. The tensor will be divided into [N, H, W, G, C'] and then we do normalization on [H, W, C'].
*/
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/tensor_ref.h"
#include "device_utils.h"
#include <float.h>
namespace cutlass {
/** \brief interface to do group norm on a device memory tensor with NHWC layout.
* \tparam T: data type
*/
template <typename T>
void groupnorm(cutlass::Tensor4DCoord input_size,
const int num_groups,
const float eps,
TensorRef<T, layout::TensorNHWC> ref_output,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNHWC> ref_gamma,
TensorRef<T, layout::TensorNHWC> ref_beta,
cudaStream_t stream);
extern __shared__ char groupnorm_shm[];
// For small prod_dim1_to_last_dim/num_groups, to avoid multiple loads from global memory,
// we store the input in the shared memory.
// grid(num_groups, dim0)
// block(BLOCKSIZE)
// BLOCKSIZE * TVecs_PER_THREAD <= prod_dim1_to_last_dim/num_group
template<typename TVec, typename T, int T_PER_TVec>
__global__ void groupnorm_twopass_store_locally(T* output,
const T* input,
const T* gamma,
const T* beta,
int num_groups,
int prod_dim1_to_last_dim,
int last_dim,
const float eps,
const int TVecs_PER_THREAD)
{
const int bid = blockIdx.y; // index of batch
const int gid = blockIdx.x; // index of group
const int tid = threadIdx.x; // index of thread
const int bdimx = blockDim.x;
const int s_reduce_elements = prod_dim1_to_last_dim / num_groups;
const int v_reduce_elements = s_reduce_elements / T_PER_TVec;
const int s_group_stride = last_dim / num_groups;
const int v_group_stride = s_group_stride / T_PER_TVec;
const int offset_of_group = (bid * prod_dim1_to_last_dim + gid * s_group_stride) / T_PER_TVec;
const TVec* input_TVec_ptr = (const TVec*)(input) + offset_of_group;
TVec* output_TVec_ptr = (TVec*)(output) + offset_of_group;
T* local_val = ((T*)groupnorm_shm) + TVecs_PER_THREAD * T_PER_TVec * tid;
float local_sum[1] = {0.0f};
// load from global memory into shared memory
#pragma unroll
for (int i = 0; i < TVecs_PER_THREAD; i += 1) {
const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec;
const int offset_in_group =
((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride))
/ T_PER_TVec;
if (current_load_start_idx < s_reduce_elements) {
TVec tmp_vec = input_TVec_ptr[offset_in_group];
T* tmp_vec_ptr = (T*)(&tmp_vec);
const int local_val_offset = i * T_PER_TVec;
#pragma unroll
for (int j = 0; j < T_PER_TVec; j++) {
float tmp = static_cast<float>(tmp_vec_ptr[j]);
local_sum[0] += tmp;
local_val[local_val_offset + j] = tmp_vec_ptr[j];
}
}
}
__shared__ float s_mean, s_variance;
// reduction for mean
if (bdimx <= 32) {
warpReduceSum<float, 1>(local_sum);
}
else {
blockReduceSum<float, 1>(local_sum);
}
if (tid == 0) {
s_mean = local_sum[0] / s_reduce_elements;
}
__syncthreads();
// reduction for std
local_sum[0] = 0.0f;
#pragma unroll
for (int i = 0; i < TVecs_PER_THREAD; i += 1) {
const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec;
if (current_load_start_idx < s_reduce_elements) {
const int local_val_offset = i * T_PER_TVec;
#pragma unroll
for (int j = 0; j < T_PER_TVec; j++) {
float tmp = static_cast<float>(local_val[local_val_offset + j]);
tmp -= s_mean;
local_sum[0] += tmp * tmp;
}
}
}
if (bdimx <= 32) {
warpReduceSum<float, 1>(local_sum);
}
else {
blockReduceSum<float, 1>(local_sum);
}
if (tid == 0) {
s_variance = rsqrtf(local_sum[0] / s_reduce_elements + eps);
}
__syncthreads();
// normalize
const int gamma_offset_of_group = gid * v_group_stride;
const TVec* gamma_TVec_ptr = (const TVec*)gamma + gamma_offset_of_group;
const TVec* beta_TVec_ptr = (const TVec*)beta + gamma_offset_of_group;
#pragma unroll
for (int i = 0; i < TVecs_PER_THREAD; i += 1) {
const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec;
const int offset_in_group =
((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride))
/ T_PER_TVec;
const int gamma_offset_in_group = (current_load_start_idx % s_group_stride) / T_PER_TVec;
const int local_val_offset = i * T_PER_TVec;
if (current_load_start_idx < s_reduce_elements) {
TVec gamma_val = gamma_TVec_ptr[gamma_offset_in_group];
TVec beta_val = beta_TVec_ptr[gamma_offset_in_group];
T* gamma_val_ptr = (T*)(&gamma_val);
T* beta_val_ptr = (T*)(&beta_val);
TVec tmp_vec;
T* tmp_vec_ptr = (T*)(&tmp_vec);
#pragma unroll
for (int j = 0; j < T_PER_TVec; j++) {
float tmp = (static_cast<float>(local_val[local_val_offset + j]) - s_mean) * s_variance
* static_cast<float>(gamma_val_ptr[j])
+ static_cast<float>(beta_val_ptr[j]);
if (sizeof(T) == sizeof(half)) {
tmp_vec_ptr[j] = T(__float2half_rn(tmp));
}
else {
tmp_vec_ptr[j] = T(tmp);
}
}
output_TVec_ptr[offset_in_group] = tmp_vec;
}
}
}
// For large prod_dim1_to_last_dim/num_groups,
// in which the data cannot be stored locally,
// we will load from global memory multiple times,
// grid(num_groups, dim0)
// block(BLOCKSIZE)
// BLOCKSIZE * TVecs_PER_THREAD <= prod_dim1_to_last_dim/num_group
template<typename TVec, typename T, int T_PER_TVec>
__global__ void groupnorm_twopass_multiple_load(T* output,
const T* input,
const T* gamma,
const T* beta,
int num_groups,
int prod_dim1_to_last_dim,
int last_dim,
const float eps,
const int TVecs_PER_THREAD)
{
const int bid = blockIdx.y; // index of batch
const int gid = blockIdx.x; // index of group
const int tid = threadIdx.x; // index of thread
const int bdimx = blockDim.x;
const int s_reduce_elements = prod_dim1_to_last_dim / num_groups;
const int v_reduce_elements = s_reduce_elements / T_PER_TVec;
const int s_group_stride = last_dim / num_groups;
const int v_group_stride = s_group_stride / T_PER_TVec;
const int offset_of_group = (bid * prod_dim1_to_last_dim + gid * s_group_stride) / T_PER_TVec;
const TVec* input_TVec_ptr = (const TVec*)(input) + offset_of_group;
TVec* output_TVec_ptr = (TVec*)(output) + offset_of_group;
float local_sum[1] = {0.0f};
#pragma unroll
for (int i = 0; i < TVecs_PER_THREAD; i += 1) {
const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec;
if (current_load_start_idx < s_reduce_elements) {
const int offset_in_group =
((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride))
/ T_PER_TVec;
TVec tmp_vec = input_TVec_ptr[offset_in_group];
T* tmp_vec_ptr = (T*)(&tmp_vec);
#pragma unroll
for (int j = 0; j < T_PER_TVec; j++) {
float tmp = static_cast<float>(tmp_vec_ptr[j]);
local_sum[0] += tmp;
}
}
}
__shared__ float s_mean, s_variance;
// reduction for mean
if (bdimx <= 32) {
warpReduceSum<float, 1>(local_sum);
}
else {
blockReduceSum<float, 1>(local_sum);
}
if (tid == 0) {
s_mean = local_sum[0] / s_reduce_elements;
}
__syncthreads();
// reduction for std
local_sum[0] = 0.0f;
#pragma unroll
for (int i = 0; i < TVecs_PER_THREAD; i += 1) {
const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec;
if (current_load_start_idx < s_reduce_elements) {
const int offset_in_group =
((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride))
/ T_PER_TVec;
TVec tmp_vec = input_TVec_ptr[offset_in_group];
T* tmp_vec_ptr = (T*)(&tmp_vec);
#pragma unroll
for (int j = 0; j < T_PER_TVec; j++) {
float tmp = static_cast<float>(tmp_vec_ptr[j]);
tmp -= s_mean;
local_sum[0] += tmp * tmp;
}
}
}
if (bdimx <= 32) {
warpReduceSum<float, 1>(local_sum);
}
else {
blockReduceSum<float, 1>(local_sum);
}
if (tid == 0) {
s_variance = rsqrtf(local_sum[0] / s_reduce_elements + eps);
}
__syncthreads();
// normalize
const int gamma_offset_of_group = gid * v_group_stride;
const TVec* gamma_TVec_ptr = (const TVec*)gamma + gamma_offset_of_group;
const TVec* beta_TVec_ptr = (const TVec*)beta + gamma_offset_of_group;
#pragma unroll
for (int i = 0; i < TVecs_PER_THREAD; i += 1) {
const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec;
if (current_load_start_idx < s_reduce_elements) {
const int offset_in_group =
((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride))
/ T_PER_TVec;
const int gamma_offset_in_group = (current_load_start_idx % s_group_stride) / T_PER_TVec;
TVec gamma_val = gamma_TVec_ptr[gamma_offset_in_group];
TVec beta_val = beta_TVec_ptr[gamma_offset_in_group];
T* gamma_val_ptr = (T*)(&gamma_val);
T* beta_val_ptr = (T*)(&beta_val);
TVec tmp_vec = input_TVec_ptr[offset_in_group];
T* tmp_vec_ptr = (T*)(&tmp_vec);
TVec output_tmp_vec;
T* output_tmp_vec_ptr = (T*)(&output_tmp_vec);
#pragma unroll
for (int j = 0; j < T_PER_TVec; j++) {
float tmp =
(static_cast<float>(tmp_vec_ptr[j]) - s_mean) * s_variance * static_cast<float>(gamma_val_ptr[j])
+ static_cast<float>(beta_val_ptr[j]);
if (sizeof(T) == sizeof(half)) {
output_tmp_vec_ptr[j] = T(__float2half_rn(tmp));
}
else {
output_tmp_vec_ptr[j] = T(tmp);
}
}
output_TVec_ptr[offset_in_group] = output_tmp_vec;
}
}
}
//ref_input & ref_output should be [N, H, W, C]
//ref_gamma & ref_beta should be [1, 1, 1, C]
template <typename T>
void groupnorm(cutlass::Tensor4DCoord input_size,
const int num_groups,
const float eps,
TensorRef<T, layout::TensorNHWC> ref_output,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNHWC> ref_gamma,
TensorRef<T, layout::TensorNHWC> ref_beta,
cudaStream_t stream){
const int N = input_size.n();
const int H = input_size.h();
const int W = input_size.w();
const int C = input_size.c();
if (C % num_groups != 0){
printf("[ERROR] C should be a multiple of num_groups.\n");
}
T* output = ref_output.data();
const T* input = ref_input.data();
const T* gamma = ref_gamma.data();
const T* beta = ref_beta.data();
const int dim0 = N;
const int last_dim = C;
const int prod_dim1_to_last_dim = H*W*C;
const int s_reduce_elements = prod_dim1_to_last_dim / num_groups;
const int s_group_stride = last_dim / num_groups;
dim3 grid(num_groups, dim0);
int threadblock_size = 32;
if (s_group_stride % 2 == 0) {
const int T_PER_TVec = 2;
while (threadblock_size < 1024) {
if (s_reduce_elements / T_PER_TVec / threadblock_size <= 8)
break;
threadblock_size *= 2;
}
dim3 block(threadblock_size);
const int TVec_PER_THREAD = (s_reduce_elements / T_PER_TVec + threadblock_size - 1) / threadblock_size;
const int shm_size = T_PER_TVec * TVec_PER_THREAD * threadblock_size * sizeof(T);
// for small s_reduce_elements, specific case for H=W=22, C=1280, num_groups=32;
// the size of grid & block may have better choice for different cases.
// ensure shared memory is smaller than 48KB
if (std::is_same<T, float>::value){
if (shm_size < 48 * 1024) {
groupnorm_twopass_store_locally<float2, T, T_PER_TVec><<<grid, block, shm_size, stream>>>(
output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD);
}
else {
groupnorm_twopass_multiple_load<float2, T, T_PER_TVec><<<grid, block, 0, stream>>>(
output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD);
}
}
else{
if (shm_size < 48 * 1024) {
groupnorm_twopass_store_locally<half2, T, T_PER_TVec><<<grid, block, shm_size, stream>>>(
output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD);
}
else {
groupnorm_twopass_multiple_load<half2, T, T_PER_TVec><<<grid, block, 0, stream>>>(
output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD);
}
}
}
else {
const int T_PER_TVec = 1;
while (threadblock_size < 1024) {
if (s_reduce_elements / T_PER_TVec / threadblock_size <= 8)
break;
threadblock_size *= 2;
}
dim3 block(threadblock_size);
const int TVec_PER_THREAD = (s_reduce_elements / T_PER_TVec + threadblock_size - 1) / threadblock_size;
const int shm_size = T_PER_TVec * TVec_PER_THREAD * threadblock_size * sizeof(T);
if (shm_size < 48 * 1024) {
groupnorm_twopass_store_locally<T, T, T_PER_TVec><<<grid, block, shm_size, stream>>>(
output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD);
}
else {
groupnorm_twopass_multiple_load<T, T, T_PER_TVec><<<grid, block, 0, stream>>>(
output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD);
}
}
}
} //namespace cutlass
| tools/util/include/cutlass/util/device_groupnorm.h/0 | {
"file_path": "tools/util/include/cutlass/util/device_groupnorm.h",
"repo_id": "tools",
"token_count": 8838
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines device-side elementwise operations on TensorView. Note, the operations defined
in this header are not specialized for any particular data layout and are therefore not
intended to offer the best possible performance. Rather, they are intended to be generic
reference implementations to support the CUTLASS unit tests.
*/
#pragma once
#if !defined(__CUDACC_RTC__)
// Standard Library includes
#include <utility>
#include <cstdlib>
#include <cmath>
#include <type_traits>
#include <cstdint>
#endif
// CUDA includes
#include <curand_kernel.h>
// Cutlass includes
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/tensor_view.h"
#include "cutlass/blas3.h"
#include "cutlass/layout/vector.h"
#include "cutlass/util/reference/device/tensor_foreach.h"
#include "cutlass/util/distribution.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reference {
namespace device {
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename FloatType>
CUTLASS_DEVICE
FloatType random_normal_float(curandState_t *state) {
return curand_normal(state);
}
template <>
CUTLASS_DEVICE
double random_normal_float<double>(curandState_t *state) {
return curand_normal_double(state);
}
template <typename FloatType>
CUTLASS_DEVICE
FloatType random_uniform_float(curandState_t *state) {
return curand_uniform(state);
}
template <>
CUTLASS_DEVICE
double random_uniform_float<double>(curandState_t *state) {
return curand_uniform_double(state);
}
template <typename Element>
struct RandomGaussianFunc {
using FloatType = typename std::conditional<(sizeof(Element) > 4), double, float>::type;
using IntType = typename std::conditional<(sizeof(Element) > 4), int64_t, int>::type;
/// Parameters structure
struct Params {
//
// Data members
//
uint64_t seed;
FloatType mean;
FloatType stddev;
int int_scale;
FloatType float_scale_up;
FloatType float_scale_down;
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
uint64_t seed_ = 0,
Element mean_ = 0,
Element stddev_ = 1,
int int_scale_ = -1
):
seed(seed_),
mean(static_cast<FloatType>(mean_)),
stddev(static_cast<FloatType>(stddev_)),
int_scale(int_scale_) {
float_scale_up = FloatType(IntType(2) << int_scale); // scale up to clamp low order bits
float_scale_down = FloatType(1) / FloatType(IntType(2) << int_scale);
}
};
//
// Data members
//
/// Parameters object
Params params;
/// RNG state object
curandState_t rng_state;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
RandomGaussianFunc(Params const ¶ms): params(params) {
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(params.seed, gtid, 0, &rng_state);
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
Element operator()() {
FloatType rnd = random_normal_float<FloatType>(&rng_state);
rnd = params.mean + params.stddev * rnd;
Element result;
if (params.int_scale >= 0) {
rnd = FloatType(IntType(std::llround(rnd * params.float_scale_up)));
result = Element(IntType(rnd * params.float_scale_down));
}
else {
result = Element(rnd);
}
return result;
}
};
template <typename Real>
struct RandomGaussianFunc<complex<Real>> {
using Element = complex<Real>;
using FloatType = typename std::conditional<(sizeof(Real) > 4), double, float>::type;
using IntType = typename std::conditional<(sizeof(Real) > 4), int64_t, int>::type;
/// Parameters structure
struct Params {
//
// Data members
//
uint64_t seed;
FloatType mean;
FloatType stddev;
int int_scale;
FloatType float_scale_up;
FloatType float_scale_down;
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
uint64_t seed_ = 0,
Real mean_ = 0,
Real stddev_ = 1,
int int_scale_ = -1
):
seed(seed_),
mean(static_cast<FloatType>(mean_)),
stddev(static_cast<FloatType>(stddev_)),
int_scale(int_scale_) {
float_scale_up = FloatType(IntType(1) << int_scale);
float_scale_up += FloatType(0.5) * float_scale_up;
float_scale_down = FloatType(1) / FloatType(IntType(1) << int_scale);
}
};
//
// Data members
//
/// Parameters object
Params params;
/// RNG state object
curandState_t rng_state;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
RandomGaussianFunc(Params const ¶ms): params(params) {
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(params.seed, gtid, 0, &rng_state);
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
Element operator()() {
FloatType rnd_r = random_normal_float<FloatType>(&rng_state);
FloatType rnd_i = random_normal_float<FloatType>(&rng_state);
rnd_r = params.mean + params.stddev * rnd_r;
rnd_i = params.mean + params.stddev * rnd_i;
Element result;
if (params.int_scale >= 0) {
rnd_r = FloatType(IntType(rnd_r * params.float_scale_up));
rnd_i = FloatType(IntType(rnd_i * params.float_scale_down));
result = {
Real(rnd_r * params.float_scale_down),
Real(rnd_i * params.float_scale_down)
};
}
else {
result = Element(Real(rnd_r), Real(rnd_i));
}
return result;
}
};
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillRandomGaussianFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
using RandomFunc = RandomGaussianFunc<Element>;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
typename RandomFunc::Params random;
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_ = TensorView(),
typename RandomFunc::Params random_ = typename RandomFunc::Params()
):
view(view_), random(random_) {
}
};
//
// Data members
//
Params params;
RandomFunc random;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorFillRandomGaussianFunc(Params const ¶ms): params(params), random(params.random) {
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
params.view.at(coord) = random();
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a Gaussian distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomGaussian(
TensorView<Element, Layout> view, ///< destination tensor
uint64_t seed, ///< seed for RNG
typename RealType<Element>::Type mean = Element(0), ///< Gaussian distribution's mean
typename RealType<Element>::Type stddev = Element(1), ///< Gaussian distribution's standard deviation
int bits = -1, ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
cudaStream_t stream = nullptr) {
using RandomFunc = detail::RandomGaussianFunc<Element>;
using Func = detail::TensorFillRandomGaussianFunc<Element, Layout>;
using Params = typename Func::Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, typename RandomFunc::Params(seed, mean, stddev, bits)),
/*grid_size*/0, /*block_size*/0,
stream
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a Gaussian distribution.
template <typename Element> ///< Element type
void BlockFillRandomGaussian(
Element *ptr,
size_t capacity,
uint64_t seed, ///< seed for RNG
typename RealType<Element>::Type mean, ///< Gaussian distribution's mean
typename RealType<Element>::Type stddev, ///< Gaussian distribution's standard deviation
int bits = -1, ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
cudaStream_t stream = nullptr) {
using RandomFunc = detail::RandomGaussianFunc<Element>;
typename RandomFunc::Params params(seed, mean, stddev, bits);
BlockForEach<Element, RandomFunc>(ptr, capacity, params, /*grid_size*/0, /*block_size*/0, stream);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Computes a random Gaussian distribution
template <typename Element> ///< Element type
struct RandomUniformFunc {
using FloatType = typename std::conditional<
(sizeof(Element) > 4),
double,
float>::type;
using IntType = typename std::conditional<
(sizeof(Element) > 4),
int64_t,
int>::type;
/// Parameters structure
struct Params {
//
// Data members
//
uint64_t seed;
FloatType range;
FloatType max;
int int_scale;
FloatType float_scale_up;
FloatType float_scale_down;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
uint64_t seed_ = 0,
Element max_ = 1,
Element min = 0,
int int_scale_ = -1
):
seed(seed_),
range(static_cast<FloatType>(max_) - static_cast<FloatType>(min)),
max(static_cast<FloatType>(max_)),
int_scale(int_scale_) {
float_scale_up = FloatType(IntType(2) << int_scale); // scale up to clamp low order bits
float_scale_down = FloatType(1) / FloatType(IntType(2) << int_scale);
}
};
//
// Data members
//
/// Parameters object
Params params;
/// RNG state object
curandState_t rng_state;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
RandomUniformFunc(Params const ¶ms): params(params) {
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(params.seed, gtid, 0, &rng_state);
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
Element operator()() {
FloatType rnd = random_uniform_float<FloatType>(&rng_state);
rnd = params.max - params.range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
Element result;
if (params.int_scale >= 0) {
rnd = FloatType(IntType(std::llround(rnd * params.float_scale_up)));
result = Element(IntType(rnd * params.float_scale_down));
}
else {
result = Element(rnd);
}
return result;
}
};
/// Computes a random Gaussian distribution
template <typename Real>
struct RandomUniformFunc<complex<Real>> {
using Element = complex<Real>;
using FloatType = typename std::conditional<
(sizeof(Real) > 4),
double,
float>::type;
using IntType = typename std::conditional<
(sizeof(Real) > 4),
int64_t,
int>::type;
/// Parameters structure
struct Params {
//
// Data members
//
uint64_t seed;
FloatType range;
FloatType min;
int int_scale;
FloatType float_scale_up;
FloatType float_scale_down;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
uint64_t seed_ = 0,
FloatType max = 1,
FloatType min_ = 0,
int int_scale_ = -1
):
seed(seed_),
range(static_cast<FloatType>(max - min_)),
min(static_cast<FloatType>(min_)),
int_scale(int_scale_) {
float_scale_up = FloatType(IntType(1) << int_scale);
float_scale_up += FloatType(0.5) * float_scale_up;
float_scale_down = FloatType(1) / FloatType(IntType(1) << int_scale);
}
};
//
// Data members
//
/// Parameters object
Params params;
/// RNG state object
curandState_t rng_state;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
RandomUniformFunc(Params const ¶ms): params(params) {
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(params.seed, gtid, 0, &rng_state);
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
Element operator()() {
FloatType rnd_r = random_uniform_float<FloatType>(&rng_state);
FloatType rnd_i = random_uniform_float<FloatType>(&rng_state);
rnd_r = params.min + params.range * rnd_r;
rnd_i = params.min + params.range * rnd_i;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
Element result;
if (params.int_scale >= 0) {
rnd_r = FloatType(IntType(rnd_r * params.float_scale_up));
rnd_i = FloatType(IntType(rnd_i * params.float_scale_up));
result = {
Real(rnd_r * params.float_scale_down),
Real(rnd_i * params.float_scale_down)
};
}
else {
result = Element(Real(rnd_r), Real(rnd_i));
}
return result;
}
};
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillRandomUniformFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
using RandomFunc = RandomUniformFunc<Element>;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
typename RandomFunc::Params random;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_ = TensorView(),
typename RandomFunc::Params random_ = RandomFunc::Params()
):
view(view_), random(random_) {
}
};
//
// Data members
//
Params params;
RandomFunc random;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorFillRandomUniformFunc(Params const ¶ms): params(params), random(params.random) {
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
params.view.at(coord) = random();
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomUniform(
TensorView<Element, Layout> view, ///< destination tensor
uint64_t seed, ///< seed for RNG
typename RealType<Element>::Type max = Element(1), ///< upper bound of distribution
typename RealType<Element>::Type min = Element(0), ///< lower bound for distribution
int bits = -1, ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
cudaStream_t stream = nullptr) {
using RandomFunc = detail::RandomUniformFunc<Element>;
using Func = detail::TensorFillRandomUniformFunc<Element, Layout>;
using Params = typename Func::Params;
typename RandomFunc::Params random(seed, max, min, bits);
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, random),
/*grid_size*/0, /*block_size*/0,
stream
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <typename Element>
void BlockFillRandomUniform(
Element *ptr,
size_t capacity,
uint64_t seed, ///< seed for RNG
typename RealType<Element>::Type max, ///< upper bound of distribution
typename RealType<Element>::Type min, ///< lower bound for distribution
int bits = -1, ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
cudaStream_t stream = nullptr) {
using RandomFunc = detail::RandomUniformFunc<Element>;
typename RandomFunc::Params params(seed, max, min, bits);
BlockForEach<Element, RandomFunc>(ptr, capacity, params, /*grid_size*/0, /*block_size*/0, stream);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Computes a random sparse meta
template <typename Element> ///< Element type
struct RandomSparseMetaFunc {
using FloatType = float;
using IntType = int32_t;
/// Parameters structure
struct Params {
//
// Data members
//
uint64_t seed;
FloatType range;
int MetaSizeInBits;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
uint64_t seed_ = 0,
int MetaSizeInBits_ = 2
):
seed(seed_),
MetaSizeInBits(MetaSizeInBits_) {
if (MetaSizeInBits_ == 2) {
range = 6;
}
else if (MetaSizeInBits_ == 4) {
range = 2;
}
else {
throw std::invalid_argument("Invalid MetaSizeInBits");
}
}
};
//
// Data members
//
/// Parameters object
Params params;
/// RNG state object
curandState_t rng_state;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
RandomSparseMetaFunc(Params const ¶ms): params(params) {
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(params.seed, gtid, 0, &rng_state);
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
Element operator()() {
Element FourToTwoMeta[6] = {0x4, 0x8, 0x9, 0xc, 0xd, 0xe};
Element TwoToOneMeta[2] = {0x4, 0xe};
Element *MetaArray =
(params.MetaSizeInBits == 2) ? FourToTwoMeta : TwoToOneMeta;
Element result = 0x0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < cutlass::sizeof_bits<Element>::value / 4; ++i) {
FloatType rnd = random_uniform_float<FloatType>(&rng_state);
rnd = params.range * rnd;
Element meta = MetaArray[(int)rnd];
result = (Element)(result | ((Element)(meta << (i * 4))));
}
return result;
}
};
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillRandomSparseMetaFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
using RandomFunc = RandomSparseMetaFunc<Element>;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
typename RandomFunc::Params random;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_ = TensorView(),
typename RandomFunc::Params random_ = RandomFunc::Params()
):
view(view_), random(random_) {
}
};
//
// Data members
//
Params params;
RandomFunc random;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorFillRandomSparseMetaFunc(Params const ¶ms): params(params), random(params.random) {
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
params.view.at(coord) = random();
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomSparseMeta(
TensorView<Element, Layout> view, ///< destination tensor
uint64_t seed, ///< seed for RNG
int MetaSizeInBits = 2, ///< meta data size
cudaStream_t stream = nullptr) {
using RandomFunc = detail::RandomSparseMetaFunc<Element>;
using Func = detail::TensorFillRandomUniformFunc<Element, Layout>;
using Params = typename Func::Params;
typename RandomFunc::Params random(seed, MetaSizeInBits);
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, random),
/*grid_size*/0, /*block_size*/0,
stream
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <typename Element>
void BlockFillRandomSparseMeta(
Element *ptr,
size_t capacity,
uint64_t seed, ///< seed for RNG
int MetaSizeInBits = 2, ///< meta data size
cudaStream_t stream = nullptr) {
using RandomFunc = detail::RandomSparseMetaFunc<Element>;
typename RandomFunc::Params params(seed, MetaSizeInBits);
BlockForEach<Element, RandomFunc>(ptr, capacity, params, /*grid_size*/0, /*block_size*/0, stream);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Functor to fill a tensor with zeros off the diagonal and a uniform value on the diagonal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillDiagonalFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Element diag;
Element other;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
Params(
TensorView view_ = TensorView(),
Element diag_ = Element(1),
Element other_ = Element(0)
):
view(view_), diag(diag_), other(other_) {
}
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorFillDiagonalFunc(Params const ¶ms): params(params) {
}
/// Updates the tensor
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
bool is_diag = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[i - 1]) {
is_diag = false;
break;
}
}
params.view.at(coord) = (is_diag ? params.diag : params.other);
}
};
// Overwrites the elements of a tensor with a uniform value depending on fill mode
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillPartialFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Element element;
FillMode fill_mode;
/// Default ctor
CUTLASS_HOST_DEVICE
Params(): fill_mode(FillMode::kNone) { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_,
Element element_,
FillMode fill_mode_
):
view(view_), element(element_), fill_mode(fill_mode_) {
}
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
CUTLASS_DEVICE
TensorFillPartialFunc(Params const ¶ms): params(params) {
}
/// Overwrites the element if it is within the covered region.
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
bool predicate = true;
switch (params.fill_mode) {
case FillMode::kFull:
predicate = true;
break;
case FillMode::kLower:
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i - 1] < coord[i]) {
predicate = false;
break;
}
}
break;
case FillMode::kUpper:
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i - 1] > coord[i]) {
predicate = false;
break;
}
}
break;
case FillMode::kDiagonal:
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i - 1] != coord[i]) {
predicate = false;
break;
}
}
break;
case FillMode::kNone: // fall-through
default:
predicate = false;
break;
}
if (predicate) {
params.view.at(coord) = params.element;
}
}
};
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorClearPartialFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
///
static_assert((Layout::kRank == 2), "TensorClearPartial is only supported for matrices");
/// Parameters structure
struct Params {
TensorView view{};
Element element{};
FillMode fill_mode{FillMode::kNone};
int alignment{0};
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
CUTLASS_DEVICE
TensorClearPartialFunc(Params const ¶ms): params(params) {
}
/// Overwrites the element if it is within the covered region.
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
bool predicate = true;
switch (params.fill_mode) {
case FillMode::kLower:
if ((coord[0] >= coord[1]) ||
((coord[1] - coord[0]) >= params.alignment)) {
predicate = false;
break;
}
break;
case FillMode::kUpper:
if ((coord[0] <= coord[1]) ||
((coord[0] - coord[1]) >= params.alignment)) {
predicate = false;
break;
}
break;
case FillMode::kNone: // fall-through
default:
predicate = false;
break;
}
if (predicate) {
params.view.at(coord) = params.element;
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor everywhere with a unique value for its diagonal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillDiagonal(
TensorView<Element, Layout> view, ///< destination tensor
Element diag = Element(1), ///< value to write in the diagonal
Element other = Element(0), ///< value to write off the diagonal
cudaStream_t stream = nullptr) {
typedef detail::TensorFillDiagonalFunc<Element, Layout> Func;
typedef typename Func::Params Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, diag, other),
/*grid_size*/0, /*block_size*/0,
stream
);
}
/// Fills a tensor partially depending on fill mode. Elements not covered by the fillmode are
/// not written.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillPartial(
TensorView<Element, Layout> view, ///< destination tensor
Element element,
FillMode fill_mode,
cudaStream_t stream = nullptr) {
typedef detail::TensorFillPartialFunc<Element, Layout> Func;
typedef typename Func::Params Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, element, fill_mode),
stream
);
}
/// Clears a tensor partially depending on fill mode and alignment. Elements on the wrong-side
/// of fillmode (upto the alignment) are overwritten with the user supplied element (typically zeros)
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorClearPartial(
TensorView<Element, Layout> view, ///< destination tensor
Element element,
FillMode fill_mode,
int alignment,
cudaStream_t stream = nullptr) {
typedef detail::TensorClearPartialFunc<Element, Layout> Func;
typedef typename Func::Params Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params{view, element, fill_mode, alignment},
/*grid_size*/0, /*block_size*/0,
stream
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with a uniform value
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFill(
TensorView<Element, Layout> view, ///< destination tensor
Element val = Element(0), ///< value to uniformly fill it with
cudaStream_t stream = nullptr) {
TensorFillDiagonal(view, val, val, stream);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor's diagonal with 1 and 0 everywhere else.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillIdentity(
TensorView<Element, Layout> view, ///< destination tensor
cudaStream_t stream = nullptr) {
TensorFillDiagonal(view, Element(1), Element(0), stream);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorUpdateDiagonalFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Element diag;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_ = TensorView(),
Element diag_ = Element(1)
):
view(view_), diag(diag_) {
}
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorUpdateDiagonalFunc(Params const ¶ms): params(params) {
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
bool is_diag = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[i - 1]) {
is_diag = false;
break;
}
}
if (is_diag) {
params.view.at(coord) = params.diag;
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a uniform value to the diagonal of a tensor without modifying off-diagonal elements.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorUpdateDiagonal(
TensorView<Element, Layout> view, ///< destination tensor
Element diag = Element(1),
cudaStream_t stream = nullptr) {
typedef detail::TensorUpdateDiagonalFunc<Element, Layout> Func;
typedef typename Func::Params Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, diag),
/*grid_size*/0, /*block_size*/0,
stream
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorUpdateOffDiagonalFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Element other;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_ = TensorView(),
Element other_ = Element(0)
):
view(view_), other(other_) {
}
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorUpdateOffDiagonalFunc(Params const ¶ms): params(params) {
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
bool is_diag = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[i - 1]) {
is_diag = false;
break;
}
}
if (!is_diag) {
params.view.at(coord) = params.other;
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a uniform value to all elements in the tensor without modifying diagonal elements.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorUpdateOffDiagonal(
TensorView<Element, Layout> view, ///< destination tensor
Element other = Element(1),
cudaStream_t stream = nullptr) {
typedef detail::TensorUpdateOffDiagonalFunc<Element, Layout> Func;
typedef typename Func::Params Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, other),
/*grid_size*/0, /*block_size*/0,
stream
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillLinearFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Array<Element, Layout::kRank> v;
Element s;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_, ///< destination tensor
Array<Element, Layout::kRank> const & v_,
Element s_ = Element(0)
):
view(view_), v(v_), s(s_) {
}
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorFillLinearFunc(Params const ¶ms): params(params) {
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
Element sum = params.s;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank; ++i) {
if constexpr (is_complex<Element>::value) {
if constexpr (sizeof_bits<Element>::value <= 32) {
sum = Element(static_cast<complex<float>>(sum) +
static_cast<complex<float>>(params.v[i]) * static_cast<complex<float>>(coord[i]));
}
}
else if constexpr (sizeof_bits<Element>::value <= 32) {
if constexpr (std::numeric_limits<Element>::is_integer) {
sum = Element(static_cast<int32_t>(sum) +
static_cast<int32_t>(params.v[i]) * static_cast<int32_t>(coord[i]));
}
else {
sum = Element(static_cast<float>(sum) +
static_cast<float>(params.v[i]) * static_cast<float>(coord[i]));
}
}
else {
sum += params.v[i] * coord[i];
}
}
params.view.at(coord) = sum;
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills tensor with a linear combination of its coordinate and another vector
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillLinear(
TensorView<Element, Layout> view, ///< destination tensor
Array<Element, Layout::kRank> const & v,
Element s = Element(0),
cudaStream_t stream = nullptr) {
using Func = detail::TensorFillLinearFunc<Element, Layout>;
using Params = typename Func::Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, v, s),
/*grid_size*/0, /*block_size*/0,
stream
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values from a distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandom(
TensorView<Element, Layout> view, ///< destination tensor
uint64_t seed,
Distribution dist,
cudaStream_t stream = nullptr) {
using Real = typename RealType<Element>::Type;
if (dist.kind == Distribution::Gaussian) {
TensorFillRandomGaussian<Element, Layout>(
view,
seed,
static_cast<Real>(dist.gaussian.mean),
static_cast<Real>(dist.gaussian.stddev),
dist.int_scale,
stream);
} else if (dist.kind == Distribution::Uniform) {
TensorFillRandomUniform<Element, Layout>(
view,
seed,
static_cast<Real>(dist.uniform.max),
static_cast<Real>(dist.uniform.min),
dist.int_scale,
stream);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a block of data with sequential elements
template <
typename Element
>
void BlockFillSequential(
Element *ptr,
int64_t capacity,
Element v = Element(1),
Element s = Element(0)) {
using Layout = layout::PackedVectorLayout;
Layout::TensorCoord size(static_cast<Layout::Index>(capacity)); // -Wconversion
Layout layout = Layout::packed(size);
TensorView<Element, Layout> view(ptr, layout, size);
Array<Element, Layout::kRank> c{};
c[0] = v;
TensorFillLinear(view, c, s);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a block of data with sequential elements
template <
typename Element
>
void BlockFillRandom(
Element *ptr,
size_t capacity,
uint64_t seed,
Distribution dist,
cudaStream_t stream = nullptr) {
using Real = typename RealType<Element>::Type;
if (dist.kind == Distribution::Gaussian) {
BlockFillRandomGaussian<Element>(
ptr,
capacity,
seed,
static_cast<Real>(dist.gaussian.mean),
static_cast<Real>(dist.gaussian.stddev),
dist.int_scale,
stream);
}
else if (dist.kind == Distribution::Uniform) {
BlockFillRandomUniform<Element>(
ptr,
capacity,
seed,
static_cast<Real>(dist.uniform.max),
static_cast<Real>(dist.uniform.min),
dist.int_scale,
stream);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorCopyDiagonalInFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Element const *ptr;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_, ///< destination tensor
Element const *ptr_
):
view(view_), ptr(ptr_) {
}
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorCopyDiagonalInFunc(Params const ¶ms): params(params) {
}
/// Only update the diagonal element
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
bool is_diagonal = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[0]) {
is_diagonal = false;
}
}
if (is_diagonal) {
params.view.at(coord) = params.ptr[coord[0]];
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies a diagonal in from host memory without modifying off-diagonal elements.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorCopyDiagonalIn(
TensorView<Element, Layout> view, ///< destination tensor
Element const *ptr, ///< dense buffer of elements
cudaStream_t stream = nullptr) {
using Func = detail::TensorCopyDiagonalInFunc<Element, Layout>;
using Params = typename Func::Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, ptr),
/*grid_size*/0, /*block_size*/0,
stream
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorCopyDiagonalOutFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Element *ptr;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_, ///< destination tensor
Element *ptr_
):
view(view_), ptr(ptr_) {
}
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorCopyDiagonalOutFunc(Params const ¶ms): params(params) {
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
bool is_diagonal = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[0]) {
is_diagonal = false;
}
}
if (is_diagonal) {
params.ptr[coord[0]] = params.view.at(coord);
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies the diagonal of a tensor into a dense buffer in host memory.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorCopyDiagonalOut(
Element *ptr, ///< dense buffer of elements
TensorView<Element, Layout> view, ///< source tensor
cudaStream_t stream = nullptr) {
using Func = detail::TensorCopyDiagonalOutFunc<Element, Layout>;
using Params = typename Func::Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, ptr),
/*grid_size*/0, /*block_size*/0,
stream
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/device/tensor_fill.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/device/tensor_fill.h",
"repo_id": "tools",
"token_count": 17955
} | 59 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for complex-valued SYMM update in host-side code.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include <assert.h>
namespace cutlass {
namespace reference {
namespace host {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// Explicitly naming types needed by this template can be cumbersome, particularly for the
/// accumulator type, so a function argument 'initial_accum' is exposed. Passing
/// AccumulatorType(0) as the last function argument can be easier than naming all template
/// arguments explicitly.
template <
typename ElementA,
typename LayoutA,
SideMode SideModeA,
FillMode FillModeA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
BlasMode BlasMode_ = BlasMode::kSymmetric,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_symm_complex(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum,
int batch_count = 1,
int64_t batch_stride_A = 0,
int64_t batch_stride_B = 0,
int64_t batch_stride_C = 0,
int64_t batch_stride_D = 0) {
static SideMode const kSideModeA = SideModeA;
static FillMode const kFillModeA = FillModeA;
static BlasMode const kBlasMode = BlasMode_;
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
static_assert(kSideModeA != SideMode::kInvalid
, "Side Mode can either be Left or Right.");
static_assert(
kFillModeA == FillMode::kLower ||
kFillModeA == FillMode::kUpper,
"Fill Mode can either be Lower or Upper.");
using CompareOp_w_diag = typename TrMatrixCompareOp<kFillModeA, DiagType::kNonUnit>::Type;
using CompareOp_wo_diag = typename TrMatrixCompareOp<kFillModeA, DiagType::kZero>::Type;
// Note: batch is ignored.
int const M = problem_size.m();
int const N = problem_size.n();
// Assuming correct k-dimension value is passed
int const K = problem_size.k();
// Blocking necessary to speedup reference implementation
int const Mblock = 16;
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
CompareOp_w_diag compare_op_1;
CompareOp_wo_diag compare_op_2;
for (int batch_idx = 0; batch_idx < batch_count; ++batch_idx) {
// Compute matrix product using blocks
for (int row_block = 0; row_block < M; row_block += Mblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
ComputeType accum[Mblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N)
{
ElementA a_1 = ElementA();
ElementB b_1 = ElementB();
ElementA a_2 = ElementA();
ElementB b_2 = ElementB();
// A x B or B x A (with diagonal)
if (kSideModeA == SideMode::kLeft) {
a_1 = (compare_op_1(row, k_block)) ?
(tensor_a.at(MatrixCoord(row, k_block))) : ElementA();
b_1 = tensor_b.at(MatrixCoord(k_block, col));
} else if (kSideModeA == SideMode::kRight) {
a_1 = tensor_b.at(MatrixCoord(row, k_block));
b_1 = (compare_op_1(k_block, col)) ?
tensor_a.at(MatrixCoord(k_block, col)) : ElementA();
}
ComputeType compute_a_1 = ComputeType(a_1);
ComputeType compute_b_1 = ComputeType(b_1);
// The imaginary parts of the diagonal elements of
// a complex data type are assumed and set to zero
if (kBlasMode == BlasMode::kHermitian && kSideModeA == SideMode::kLeft && row == k_block) {
compute_a_1 = real(compute_a_1);
} else if (kBlasMode == BlasMode::kHermitian && kSideModeA == SideMode::kRight && k_block == col) {
compute_b_1 = real(compute_b_1);
}
accum[i][j] = inner_product_op(compute_a_1, compute_b_1, accum[i][j]);
// A^T x B or B x A^T (without diagonal)
if (kSideModeA == SideMode::kLeft) {
a_2 = (compare_op_2(k_block, row)) ?
(tensor_a.at(MatrixCoord(k_block, row))) : ElementA();
b_2 = tensor_b.at(MatrixCoord(k_block, col));
if (kBlasMode == BlasMode::kHermitian)
a_2 = conj(a_2);
} else if (kSideModeA == SideMode::kRight) {
a_2 = tensor_b.at(MatrixCoord(row, k_block));
b_2 = (compare_op_2(col, k_block)) ?
tensor_a.at(MatrixCoord(col, k_block)) : ElementA();
if (kBlasMode == BlasMode::kHermitian)
b_2 = conj(b_2);
}
ComputeType compute_a_2 = ComputeType(a_2);
ComputeType compute_b_2 = ComputeType(b_2);
accum[i][j] = inner_product_op(compute_a_2, compute_b_2, accum[i][j]);
}
}
}
}
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N) {
ScalarType c = tensor_c.at(coord);
tensor_d.at(coord) = convert_op(
alpha * ScalarType(accum[i][j]) +
beta * c);
}
}
}
} // for (col_block)
} // for (row_block)
tensor_a.add_pointer_offset(batch_stride_A);
tensor_b.add_pointer_offset(batch_stride_B);
tensor_c.add_pointer_offset(batch_stride_C);
tensor_d.add_pointer_offset(batch_stride_D);
} // for (batch_idx)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
SideMode SideModeA,
FillMode FillModeA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
BlasMode BlasMode_ = cutlass::BlasMode::kSymmetric,
typename InnerProductOp = cutlass::arch::OpMultiplyAddComplex
>
struct SymmComplex;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA,
SideMode SideModeA, FillMode FillModeA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType,
BlasMode BlasMode_>
struct SymmComplex<ElementA, LayoutA,
SideModeA, FillModeA,
ElementB, LayoutB,
ElementC, LayoutC, ScalarType,
ComputeType, BlasMode_,
arch::OpMultiplyAddComplex> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_symm_complex<ElementA, LayoutA,
SideModeA, FillModeA,
ElementB, LayoutB,
ElementC, LayoutC,
ScalarType, ComputeType, BlasMode_, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for gaussian multiply-add
template <typename ElementA, typename LayoutA,
SideMode SideModeA, FillMode FillModeA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType,
BlasMode BlasMode_>
struct SymmComplex<ElementA, LayoutA,
SideModeA, FillModeA,
ElementB, LayoutB,
ElementC, LayoutC, ScalarType,
ComputeType, BlasMode_,
arch::OpMultiplyAddGaussianComplex> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_symm_complex<ElementA, LayoutA,
SideModeA, FillModeA,
ElementB, LayoutB,
ElementC, LayoutC,
ScalarType, ComputeType, BlasMode_, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/host/symm_complex.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/symm_complex.h",
"repo_id": "tools",
"token_count": 5243
} | 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS layout visualization example
*/
#pragma once
#include <algorithm>
#include <stdexcept>
#include <vector>
#include "cutlass/coord.h"
#include "cutlass/util/reference/host/tensor_foreach.h"
#include "register_layout.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord, int Rank>
struct vector_to_coord {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
coord[Rank - 1] = vec.at(Rank - 1);
if (Rank > 1) {
vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
}
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 1> {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
coord[0] = vec.at(0);
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 0> {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
std::ostream &operator<<(std::ostream &out, std::vector<T> const &vec) {
auto it = vec.begin();
if (it != vec.end()) {
out << *it;
for (++it; it != vec.end(); ++it) {
out << ", " << *it;
}
}
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permits copying static-length vectors into dynamic vectors
template <typename TensorCoord, int Rank>
struct coord_to_vector {
coord_to_vector(std::vector<int> &vec, TensorCoord const &coord) {
vec.at(Rank - 1) = coord[Rank - 1];
coord_to_vector<TensorCoord, Rank - 1>(vec, coord);
}
};
/// Permits copying static-length vectors into dynamic vectors
template <typename TensorCoord>
struct coord_to_vector<TensorCoord, 1> {
coord_to_vector(std::vector<int> &vec, TensorCoord const &coord) {
vec.at(0) = coord[0];
}
};
/// Permits copying static-length vectors into dynamic vectors
template <typename TensorCoord>
struct coord_to_vector<TensorCoord, 0> {
coord_to_vector(std::vector<int> &vec, TensorCoord const &coord) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure representing an element in source memory
struct Element {
std::vector<int> coord; ///< logical coordinate of element (as vector)
int offset; ///< linear offset from source memory
int color; ///< enables coloring each element to indicate
/// Default ctor
inline Element(): offset(-1), color(0) { }
/// Construct from logical coordinate and initial offset
inline Element(
std::vector<int> const &coord_,
int offset_,
int color_ = 0
):
coord(coord_), offset(offset_), color(color_) { }
/// Returns true if element is in a defined state
inline bool valid() const {
return offset >= 0;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Visualizes memory layouts by constructing a 'shape'
template <typename Layout_>
class VisualizeLayout : public VisualizeLayoutBase {
public:
using Layout = Layout_;
using TensorCoord = typename Layout::TensorCoord;
using Stride = typename Layout::Stride;
public:
Options options;
Layout layout;
TensorCoord extent;
std::vector<Element> elements;
public:
/// Initializes the problem space
VisualizeLayout() {
}
/// visualization method
bool visualize(Options const &options_) {
options = options_;
if (options.extent.size() != TensorCoord::kRank) {
std::cerr
<< "--extent must have rank " << TensorCoord::kRank
<< " (given: " << options.extent.size() << ")" << std::endl;
return false;
}
vector_to_coord<TensorCoord, TensorCoord::kRank>(extent, options.extent);
// Construct the layout for a packed tensor
if (options.stride.empty()) {
layout = Layout::packed(extent);
}
else if (options.stride.size() != Stride::kRank) {
std::cerr
<< "--stride must have rank " << Stride::kRank
<< " (given: " << options.stride.size() << ")" << std::endl;
return false;
}
else {
// Stride from
Stride stride;
vector_to_coord<Stride, Stride::kRank>(stride, options.stride);
layout = Layout(stride);
}
// Resize elements, setting elements to 'undefined' state
elements.resize(layout.capacity(extent));
// enumerate points in tensor space and assign
cutlass::reference::host::TensorForEachLambda(
extent,
[&](TensorCoord coord) {
std::vector<int> coord_vec(TensorCoord::kRank, 0);
coord_to_vector<TensorCoord, TensorCoord::kRank>(coord_vec, coord);
int offset = int(layout(coord));
if (offset >= int(elements.size())) {
std::cerr
<< "Layout error - " << coord_vec
<< " is out of range (computed offset: " << offset
<< ", capacity: " << elements.size() << std::endl;
throw std::out_of_range("(TensorForEach) layout error - coordinate out of range");
}
elements.at(offset) = Element(coord_vec, offset);
});
return true;
}
/// Verifies the layout satisfies vectorization requirements
bool verify(bool verbose, std::ostream &out) {
return true;
}
private:
/// returns a pair (is_vectorizable, one_changing_rank) to determine if a
/// vector exists (consecutive logical coordinates or uniformly invalid)
/// at the given location.
std::pair< bool, int > _is_vectorizable(int i) const {
// (all elements are invalid) or
// (all elements are valid AND
// exactly one rank is changing AND
// elements are consecutive)
// Don't need vectorization.
if (options.vectorize <= 2) return std::make_pair(false, -1);
// Boundary check.
if (i > int(elements.size()) || (i + options.vectorize - 1) > int(elements.size()))
return std::make_pair(false, -1);
// Check if either all elements are valid or invalid.
bool all_elements_invalid = std::all_of(
elements.begin() + i, elements.begin() + i + options.vectorize,
[](Element const &e) { return !e.valid(); });
bool all_elements_valid = std::all_of(
elements.begin() + i, elements.begin() + i + options.vectorize,
[](Element const &e) { return e.valid(); });
if (!all_elements_invalid && !all_elements_valid)
return std::make_pair(false, -1);
// From here, it is vectorizable.
if (all_elements_invalid) return std::make_pair(true, -1);
// Check if only exactly one rank is changing.
int one_changing_rank = -1;
for (int j = 0; j < options.vectorize; ++j) {
for (int r = 0; r < TensorCoord::kRank; ++r) {
if (elements.at(i + j).coord.at(r) != elements.at(i).coord.at(r)) {
if (one_changing_rank == -1) {
one_changing_rank = r;
} else if (one_changing_rank != r) {
return std::make_pair(false, -1);
}
}
}
}
return std::make_pair(true, one_changing_rank);
}
/// Prints a vector of elements
void _print_vector(std::ostream &out, int i, int one_changing_rank) {
Element const &base_element = elements.at(i);
if (base_element.valid()) {
out << "(";
for (int r = 0; r < TensorCoord::kRank; ++r) {
if (r) {
out << ", ";
}
if (r == one_changing_rank) {
out
<< base_element.coord.at(r)
<< ".."
<< (base_element.coord.at(r) + options.vectorize - 1);
}
else {
out << base_element.coord.at(r);
}
}
out << ")";
}
else {
out << " ";
}
}
/// Prints a single element
void _print_element(std::ostream &out, int k) {
Element const &element = elements.at(k);
if (element.valid()) {
out << "(";
for (int v = 0; v < TensorCoord::kRank; ++v) {
out << (v ? ", " : "") << element.coord.at(v);
}
out << ")";
}
else {
out << " ";
}
}
public:
/// Pretty-prints the layout to the console
void print_csv(std::ostream &out, char delim = '|', char new_line = '\n') {
int row = -1;
for (int i = 0; i < int(elements.size()); i += options.vectorize) {
if (i % options.output_shape.at(0)) {
out << delim;
}
else {
if (row >= 0) {
out << new_line;
}
++row;
if (row == options.output_shape.at(1)) {
out << new_line;
row = 0;
}
}
auto is_vector = _is_vectorizable(i);
if (is_vector.first) {
_print_vector(out, i, is_vector.second); // print a vector starting at element i
}
else {
for (int j = 0; j < options.vectorize; ++j) { // print individual elements [i..i+j)
_print_element(out, i + j);
}
}
}
out << new_line << std::flush;
}
/// Help message
virtual std::ostream &print_help(std::ostream &out) {
out << "TensorCoord rank " << TensorCoord::kRank << ", Stride rank: " << Stride::kRank;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/03_visualize_layout/visualize_layout.h/0 | {
"file_path": "examples/03_visualize_layout/visualize_layout.h",
"repo_id": "examples",
"token_count": 4166
} | 0 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Planar Complex Array Example
This example demonstrates the CUTLASS Library's exposure of planar complex GEMM kernels which
execute a batch of matrix products, loading problem sizes and matrix base pointers from arrays
in global memory.
These kernels represent complex matrices by storing the real and imaginary parts of the matrix in
disjoint regions in memory. These real-valued matrices are stored using existing cuBLAS layouts
as either column-major or row-major layouts with a single leading dimension indicating the stride
between columns or rows.
The CUTLASS Library collects multiple template instantiations in a data structure and offers
a BLAS-like dispatch API to invoke the appropriate kernel on the Volta or Turing architectures.
CUTLASS decouples matrix layout from complex transformation, so four possible transformations
are possible on the A and B operands:
n: column-major
c: column-major complex conjugate
t: row-major
h: row-major complex conjugate
To build strictly the planar complex kernels needed for general application, execute the following
CMake command in an empty build directory.
$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" \
-DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_*gemm_planar_complex
This builds all planar complex GEMM variants for Volta and Turing architectures.
To build strictly the kernels needed for this example, an even narrower filter string may be
specified as follows. This only builds planar complex GEMMs targeting Tensor Cores for
the 'CN' layout configuration (conjugate A operand with both A and B as column-major).
$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" \
-DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_f16_s*gemm_planar_complex_array_f16*cn
$ make 11_planar_complex_array
$ ./examples/11_planar_complex_array/11_planar_complex_array --m=2048 --n=1024 --k=512 --batch=10
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor_planar_complex.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/device/gemm_planar_complex.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/library/handle.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
cutlass::complex<float> alpha;
cutlass::complex<float> beta;
bool reference_check;
int iterations;
Options():
help(false),
problem_size({1024, 1024, 1024}),
batch_count(1),
reference_check(true),
iterations(20),
alpha(1),
beta() { }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("batch", batch_count);
cmd.get_cmd_line_argument("alpha", alpha.real());
cmd.get_cmd_line_argument("alpha_i", alpha.imag());
cmd.get_cmd_line_argument("beta", beta.real());
cmd.get_cmd_line_argument("beta_i", beta.imag());
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "11_planar_complex_array example\n\n"
<< " This example uses the CUTLASS Library to execute Planar Complex Array GEMM computations.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --batch=<int> Number of GEMM operations executed in one batch\n"
<< " --alpha=<f32> Epilogue scalar alpha (real part)\n"
<< " --alpha_i=<f32> Epilogue scalar alpha (imaginary part)\n"
<< " --beta=<f32> Epilogue scalar beta (real part)\n\n"
<< " --beta_i=<f32> Epilogue scalar beta (imaginary part)\n\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/11_planar_complex_array/11_planar_complex_array\n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.product() * batch_count * 4;
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Performance test environment for planar complex
class TestbedPlanarComplex {
public:
// Half-precision input and output
using Element = cutlass::half_t;
// Configurations for layouts and internal computation
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using ElementCompute = float;
using ElementAccumulator = float;
//
// Data members
//
cutlass::library::Handle handle;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
cutlass::DeviceAllocation<Element> tensor_A;
cutlass::DeviceAllocation<Element> tensor_B;
cutlass::DeviceAllocation<Element> tensor_C;
cutlass::DeviceAllocation<Element> tensor_D;
cutlass::DeviceAllocation<Element> tensor_D_ref;
cutlass::DeviceAllocation<void *> ptr_A_real;
cutlass::DeviceAllocation<void *> ptr_A_imag;
cutlass::DeviceAllocation<void *> ptr_B_real;
cutlass::DeviceAllocation<void *> ptr_B_imag;
cutlass::DeviceAllocation<void *> ptr_C_real;
cutlass::DeviceAllocation<void *> ptr_C_imag;
cutlass::DeviceAllocation<void *> ptr_D_real;
cutlass::DeviceAllocation<void *> ptr_D_imag;
//
// Methods
//
TestbedPlanarComplex(
Options const &options
):
problem_size(options.problem_size), batch_count(options.batch_count) {
// Allocate device memory for batched planar complex GEMM
tensor_A.reset(int64_t(problem_size.m()) * problem_size.k() * batch_count * 2);
tensor_B.reset(int64_t(problem_size.k()) * problem_size.n() * batch_count * 2);
tensor_C.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2);
tensor_D.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2);
tensor_D_ref.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2);
ptr_A_real.reset(batch_count);
ptr_A_imag.reset(batch_count);
ptr_B_real.reset(batch_count);
ptr_B_imag.reset(batch_count);
ptr_C_real.reset(batch_count);
ptr_C_imag.reset(batch_count);
ptr_D_real.reset(batch_count);
ptr_D_imag.reset(batch_count);
}
void initialize() {
uint64_t seed = 1073;
// Use small integers to simplify correctness checking
int scope_max = 6;
int scope_min = -6;
cutlass::reference::device::BlockFillRandomUniform(
tensor_A.get(), tensor_A.size(), seed, Element(scope_max), Element(scope_min), 0);
cutlass::reference::device::BlockFillRandomUniform(
tensor_B.get(), tensor_B.size(), seed * 2019, Element(scope_max), Element(scope_min), 0);
cutlass::reference::device::BlockFillRandomUniform(
tensor_C.get(), tensor_C.size(), seed * 2020, Element(scope_max), Element(scope_min), 0);
}
Result profile(Options const &options) {
Result result;
initialize();
Element *ptr_A = tensor_A.get();
Element *ptr_B = tensor_B.get();
Element *ptr_C = tensor_C.get();
Element *ptr_D = tensor_D.get();
int64_t batch_stride_A = int64_t(problem_size.m()) * problem_size.k() * 2;
int64_t batch_stride_B = int64_t(problem_size.k()) * problem_size.n() * 2;
int64_t batch_stride_C = int64_t(problem_size.m()) * problem_size.n() * 2;
int64_t batch_stride_D = int64_t(problem_size.m()) * problem_size.n() * 2;
typename LayoutA::Stride::Index lda = LayoutA::packed({problem_size.m(), problem_size.k()}).stride(0);
typename LayoutB::Stride::Index ldb = LayoutB::packed({problem_size.k(), problem_size.n()}).stride(0);
typename LayoutC::Stride::Index ldc = LayoutC::packed({problem_size.m(), problem_size.n()}).stride(0);
typename LayoutC::Stride::Index ldd = LayoutC::packed({problem_size.m(), problem_size.n()}).stride(0);
int64_t imag_stride_A = int64_t(problem_size.m()) * problem_size.k();
int64_t imag_stride_B = int64_t(problem_size.k()) * problem_size.n();
int64_t imag_stride_C = int64_t(problem_size.m()) * problem_size.n();
int64_t imag_stride_D = int64_t(problem_size.m()) * problem_size.n();
//
// Configure pointers in global memory
//
struct {
Element *base;
void **ptr_real;
void **ptr_imag;
int64_t batch_stride;
int64_t imag_stride;
} tensors[] = {
{ tensor_A.get(), ptr_A_real.get(), ptr_A_imag.get(), batch_stride_A, imag_stride_A},
{ tensor_B.get(), ptr_B_real.get(), ptr_B_imag.get(), batch_stride_B, imag_stride_B},
{ tensor_C.get(), ptr_C_real.get(), ptr_C_imag.get(), batch_stride_C, imag_stride_C},
{ tensor_D.get(), ptr_D_real.get(), ptr_D_imag.get(), batch_stride_D, imag_stride_D}
};
for (auto const &tensor : tensors) {
for (int idx = 0; idx < batch_count; ++idx) {
void *ptr_real = tensor.base + idx * tensor.batch_stride;
void *ptr_imag = tensor.base + idx * tensor.batch_stride + tensor.imag_stride;
cudaError_t error = cudaMemcpy(
tensor.ptr_real + idx,
&ptr_real,
sizeof(void *),
cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
throw std::runtime_error("Failed to copy pointer to device memory");
}
error = cudaMemcpy(
tensor.ptr_imag + idx,
&ptr_imag,
sizeof(void *),
cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
throw std::runtime_error("Failed to copy pointer to device memory");
}
}
}
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMM operations
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
//
// Execute the planar complex array GEMM kernel via the CUTLASS Library's
// dispatch routines.
//
// Note, for planar complex array GEMM kernels, all numeric type arguments
// specify the data type of the base real types. These are understood to
// apply to planar complex representations of matrices in memory and to complex<T>
// structures for scalars.
//
// See tools/library/include/cutlass/library/handle.h for more details.
//
result.status = handle.gemm_planar_complex_array(
problem_size.m(), // expected GEMM M dimension
problem_size.n(), // expected GEMM N dimension
problem_size.k(), // expected GEMM K dimension
batch_count, // Number of batched elements
nullptr,
nullptr,
nullptr,
cutlass::library::NumericTypeID::kF32, // Base data type of complex-valued accumulation
cutlass::library::NumericTypeID::kF32, // Base data type of complex-valued alpha/beta scalars
&options.alpha, // Pointer to alpha scalar, of type complex<T>
cutlass::library::NumericTypeID::kF16, // Base data type of complex-valued A matrix
cutlass::library::LayoutTypeID::kColumnMajor, // Layout of A matrix
cutlass::library::ComplexTransform::kConjugate, // Complex transformation on A matrix operand
ptr_A_real.get(), // Pointer to array of pointers to real part of A matrix
ptr_A_imag.get(), // Pointer to array of pointers to imaginary part of A matrix
lda, // Leading dimension of real part of A matrix
lda, // Leading dimension of imaginary part of A matrix
cutlass::library::NumericTypeID::kF16, // Base data type of complex-valued B matrix
cutlass::library::LayoutTypeID::kColumnMajor, // Layout of B matrix
cutlass::library::ComplexTransform::kNone, // Complex transformation on B matrix operand
ptr_B_real.get(), // Pointer to array of pointers to real part of B matrix
ptr_B_imag.get(), // Pointer to array of pointers to imaginary part of B matrix
ldb, // Leading dimension of real part of B matrix
ldb, // Leading dimension of imaginary part of B matrix
&options.beta, // Pointer to beta scalar, of type complex<T>
cutlass::library::NumericTypeID::kF16, // Base data type of complex valued C and D matrices
ptr_C_real.get(), // Pointer to array of pointers to real part of C matrix
ptr_C_imag.get(), // Pointer to array of pointers to imaginary part of C matrix
ldc, // Leading dimension of real part of C matrix
ldc, // Leading dimension of imaginary part of C matrix
ptr_D_real.get(), // Pointer to array of pointers to real part of D matrix
ptr_D_imag.get(), // Pointer to array of pointers to imaginary part of D matrix
ldd, // Leading dimension of real part of D matrix
ldd // Leading dimension of imaginary part of D matrix
);
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS internal error - configuration not supported" << std::endl;
return result;
}
}
//
// Stop profiling loop
//
// Record an event when the GEMM operations have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
if (handle.get_last_operation()) {
std::cout << "Recently executed '" << handle.get_last_operation()->description().name << "'" << std::endl;
}
//
// Compute reference in device code
//
if (options.reference_check) {
result.passed = true;
for (int64_t idx = 0; result.passed && idx < int64_t(batch_count); ++idx) {
cutlass::reference::device::GemmPlanarComplex<
Element, LayoutA,
Element, LayoutB,
Element, LayoutC,
ElementAccumulator
>(
problem_size,
options.alpha,
{tensor_A.get() + idx * batch_stride_A, lda, imag_stride_A},
cutlass::ComplexTransform::kConjugate,
{tensor_B.get() + idx * batch_stride_B, ldb, imag_stride_B},
cutlass::ComplexTransform::kNone,
options.beta,
{tensor_C.get() + idx * batch_stride_C, ldc, imag_stride_C},
{tensor_D_ref.get() + idx * batch_stride_D, ldd, imag_stride_D}
);
Element epsilon = 0.1_hf;
Element nonzero_floor = 0.1_hf;
result.passed = cutlass::reference::device::BlockCompareRelativelyEqual(
tensor_D.get() + idx * batch_stride_D,
tensor_D_ref.get() + idx * batch_stride_D,
batch_stride_D,
epsilon,
nonzero_floor
);
}
if (result.passed) {
std::cout << "Reference check passed." << std::endl;
}
else {
std::cerr << "Error - reference check failed." << std::endl;
}
}
std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " GFLOPs: " << result.gflops << std::endl;
return result;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
//
// This example uses mma.sync to directly access Tensor Cores to achieve peak performance.
//
// Volta Tensor Core operations are first available in CUDA 10.1 Toolkit.
//
// Turing Tensor Core operations are first available in CUDA 10.2 Toolkit.
//
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (props.major < 7) {
std::cerr << "Tensor Core operations must be run on a machine with compute capability at least 70."
<< std::endl;
// Returning zero so this passes on older architectures. Its actions are no-op.
return 0;
}
else if (props.major == 7 && props.minor <= 2) {
//
// If running on the Volta architecture, at least CUDA 10.1 Toolkit is required to run this example.
//
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Volta Tensor Core operations must be compiled with CUDA 10.1 Toolkit or later." << std::endl;
// Returning zero so this passes on older Toolkits. Its actions are no-op.
return 0;
}
}
else if (props.major == 7 && props.minor >= 5) {
//
// If running on the Turing architecture, at least CUDA 10.2 Toolkit is required to run this example.
//
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
// Returning zero so this passes on older Toolkits. Its actions are no-op.
return 0;
}
}
else {
// NVIDIA Ampere Architecture GPUs (SM80 and later) are fully supported on CUDA 11 Toolkit and beyond.
//
// fall through
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
TestbedPlanarComplex testbed(options);
Result result = testbed.profile(options);
return result.passed ? 0 : -1;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/11_planar_complex_array/planar_complex_array.cu/0 | {
"file_path": "examples/11_planar_complex_array/planar_complex_array.cu",
"repo_id": "examples",
"token_count": 9173
} | 1 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h"
#include "cutlass/transform/threadblock/predicated_vector_access_iterator.h"
#include "cutlass/transform/threadblock/vector_iterator.h"
#include "cutlass/transform/warp/vector_fragment_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
#include "kernel/b2b_implicit_gemm_convolution.h"
#include "threadblock/b2b_implicit_gemm_pipelined.h"
#include "threadblock/b2b_implicit_gemm_multistage.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape0,
typename ThreadblockShape1,
typename WarpShape0,
typename WarpShape1,
typename InstructionShape,
typename EpilogueOutputOp0,
typename EpilogueOutputOp1,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic,
bool SmemAccumulator = false
> struct DefaultB2bConv2dFprop;
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop.h",
"repo_id": "examples",
"token_count": 1110
} | 2 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
#include "threadblock/b2b_mma_base.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape0_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA0_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA0_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA0,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB0_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB0_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB0,
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape1_,
/// Iterates over the intermediate accumulator tile
// (concept::MmaTensorOpFragmentIterator)
typename FragmentIteratorA1_,
/// Iterates over vectors of scale and bias vector in global memory
// (concept: VectorIterator)
typename IteratorAccumulatorScaleBias_,
/// WarpIterator to load Scale or Bias vector from threadblock fragment
typename FragmentIteratorA1ScaleBias_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB1_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB1_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB1,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Output operator for 1st Gemm(concept: epilogue::thread::LinearCombinationClamp, etc...)
typename OutputOp_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy0_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy1_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class B2bMmaMultistage :
public B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages> {
public:
///< Base class
using Base = B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape0 = Shape0_;
///< Iterates over tiles of A operand in global memory
using IteratorA0 = IteratorA0_;
using IteratorA = IteratorA0;
///< Iterates over tiles of B operand in global memory
using IteratorB0 = IteratorB0_;
using IteratorB = IteratorB0;
///< Policy describing tuning details
using Policy0 = Policy0_;
using SmemIteratorA0 = SmemIteratorA0_;
using SmemIteratorB0 = SmemIteratorB0_;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape1 = Shape1_;
///< Iterates over intermediate accumulator tile
using FragmentIteratorA1 = FragmentIteratorA1_;
///< Iterates over tiles of the scale and bias vectors in global memory
using IteratorAccumulatorScaleBias = IteratorAccumulatorScaleBias_;
///< WarpIterator to load Scale or Bias vector from threadblock fragment
using FragmentIteratorA1ScaleBias = FragmentIteratorA1ScaleBias_;
///< Iterates over tiles of B operand in global memory
using IteratorB1 = IteratorB1_;
///< Policy describing tuning details
using Policy1 = Policy1_;
///< Export Policy0 as the threadblock-level Mma's policy
using Policy = Policy0;
using Shape = Shape0;
using SmemIteratorB1 = SmemIteratorB1_;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
///< Epilogue after 1st Gemm
using OutputOp = OutputOp_;
static const bool PerChannelScale = (OutputOp::kScale ==
epilogue::thread::ScaleType::OnlyAlphaPerChannelScaling);
static cutlass::arch::CacheOperation::Kind const kCacheOpA0 = CacheOpA0;
static cutlass::arch::CacheOperation::Kind const kCacheOpB0 = CacheOpB0;
static cutlass::arch::CacheOperation::Kind const kCacheOpB1 = CacheOpB1;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC0 = typename Policy0::Operator::FragmentC;
/// Warp-level Mma
using Operator0 = typename Policy0::Operator;
/// Fragment of Scale and Bias loaded from global memory
using FragmentA1ScaleBias = typename IteratorAccumulatorScaleBias::Fragment;
/// Fragment of accumulator tile
using FragmentC1 = typename Policy1::Operator::FragmentC;
/// Warp-level Mma
using Operator1 = typename Policy1::Operator;
/// Minimum architecture is Sm80 to support cp.async
using ArchTag = arch::Sm80;
/// Complex transform on A operand
static ComplexTransform const kTransformA0 = Operator0::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB0 = Operator0::kTransformB;
/// Complex transform on B operand
static ComplexTransform const kTransformB1 = Operator1::kTransformB;
/// Complex transform exports needed by higher-level kernels
static ComplexTransform const kTransformA = kTransformA0;
static ComplexTransform const kTransformB = kTransformB0;
/// Internal structure exposed for introspection.
struct Detail {
static_assert(Base::kWarpGemmIterations0 > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
static_assert(Base::kWarpGemmIterations1 > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
/// Number of cp.async instructions to load one stage of operand A
static int const TBLoadIterationsA0 =
IteratorA0::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const TBLoadIterationsB0 =
IteratorB0::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load one stage of operand B
static int const TBLoadIterationsB1 =
IteratorB1::ThreadMap::Iterations::kCount;
/// Number of stages
static int const kStages = Stages;
/// Number of cp.async instructions to load on group of operand A
static int const kAccessesPerGroupA0 =
(TBLoadIterationsA0 + Base::kWarpGemmIterations0 - 1) / Base::kWarpGemmIterations0;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB0 =
(TBLoadIterationsB0 + Base::kWarpGemmIterations0 - 1) / Base::kWarpGemmIterations0;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB1 =
(TBLoadIterationsB1 + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1;
};
private:
using WarpLoadedFragmentA0 = typename Operator0::FragmentA;
using WarpLoadedFragmentB0 = typename Operator0::FragmentB;
/// Warp Fragment of operand A1 loaded from accmulator tile
using WarpLoadedFragmentA1 = typename FragmentIteratorA1::Fragment;
using WarpLoadedFragmentA1ScaleBias =
typename FragmentIteratorA1ScaleBias::Fragment;
using WarpLoadedFragmentB1 = typename Operator1::FragmentB;
using WarpTransformedFragmentA0 = typename Operator0::TransformedFragmentA;
using WarpTransformedFragmentB0 = typename Operator0::TransformedFragmentB;
using WarpTransformedFragmentA1 = typename Operator1::TransformedFragmentA;
using WarpTransformedFragmentB1 = typename Operator1::TransformedFragmentB;
private:
//
// Data members
//
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA0 smem_iterator_A0_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB0 smem_iterator_B0_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB1 smem_iterator_B1_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
B2bMmaMultistage(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::B2bMmaSharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx,
///< GEMM0 N is used for accumulator extent
int problem_size_0_n
):
Base(shared_storage, thread_idx, warp_idx, lane_idx),
smem_iterator_A0_(shared_storage.shared_storage0.operand_A_ref(), thread_idx),
smem_iterator_B0_(shared_storage.shared_storage0.operand_B_ref(), thread_idx),
smem_iterator_B1_(shared_storage.shared_storage1.operand_B_ref(), thread_idx)
{
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount0::kM * Base::WarpCount0::kN);
int warp_idx_k = warp_idx / (Base::WarpCount0::kM * Base::WarpCount0::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount0::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount0::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A0_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations0 * warp_idx_k});
this->warp_tile_iterator_B0_.add_tile_offset(
{Base::kWarpGemmIterations0 * warp_idx_k, warp_idx_n});
this->warp_tile_iterator_B1_.add_tile_offset(
{Base::kWarpGemmIterations1 * warp_idx_k, warp_idx_n});
}
CUTLASS_DEVICE
void copy_tiles_and_advance_0(IteratorA0 &iterator_A0, IteratorB0 &iterator_B0,
int group_start_A0 = 0, int group_start_B0 = 0) {
iterator_A0.set_iteration_index(group_start_A0 *
IteratorA0::kAccessesPerVector);
this->smem_iterator_A0_.set_iteration_index(group_start_A0);
// Load for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupA0; ++j) {
if (group_start_A0 + j < Detail::TBLoadIterationsA0) {
typename IteratorA0::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA0::AccessType *>(
this->smem_iterator_A0_.get());
int const kSrcBytes = sizeof_bits<typename IteratorA0::Element>::value *
IteratorA0::ThreadMap::kElementsPerAccess /
IteratorA0::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA0::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_A0.get();
cutlass::arch::cp_async<kSrcBytes, kCacheOpA0>(
dst_ptr + v, gmem_ptr, iterator_A0.valid());
++iterator_A0;
}
++this->smem_iterator_A0_;
}
}
iterator_B0.set_iteration_index(group_start_B0 *
IteratorB0::kAccessesPerVector);
this->smem_iterator_B0_.set_iteration_index(group_start_B0);
// Load for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB0; ++j) {
if (group_start_B0 + j < Detail::TBLoadIterationsB0) {
typename IteratorB0::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB0::AccessType *>(
this->smem_iterator_B0_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB0::Element>::value *
IteratorB0::ThreadMap::kElementsPerAccess /
IteratorB0::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB0::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B0.get();
cutlass::arch::cp_async<kSrcBytes, kCacheOpB0>(
dst_ptr + v, gmem_ptr, iterator_B0.valid());
++iterator_B0;
}
++this->smem_iterator_B0_;
}
}
}
CUTLASS_DEVICE
void copy_tiles_and_advance_1(IteratorB1 &iterator_B1,
int group_start_B1 = 0) {
iterator_B1.set_iteration_index(group_start_B1 *
IteratorB1::kAccessesPerVector);
this->smem_iterator_B1_.set_iteration_index(group_start_B1);
// Load for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB1; ++j) {
if (group_start_B1 + j < Detail::TBLoadIterationsB1) {
typename IteratorB1::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB1::AccessType *>(
this->smem_iterator_B1_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value *
IteratorB1::ThreadMap::kElementsPerAccess /
IteratorB1::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B1.get();
cutlass::arch::cp_async<kSrcBytes, kCacheOpB1>(
dst_ptr + v, gmem_ptr, iterator_B1.valid());
++iterator_B1;
}
++this->smem_iterator_B1_;
}
}
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations_0,
///< destination accumulator tile
FragmentC1 &accum,
///< iterator over A0 operand in global memory
IteratorA0 iterator_A0,
///< iterator over B0 operand in global memory
IteratorB0 iterator_B0,
///< iterator over A1 operand scale vector in global memory
IteratorAccumulatorScaleBias iterator_A1_scale,
///< iterator over A1 operand bias vector in global memory
IteratorAccumulatorScaleBias iterator_A1_bias,
///< iterator over B1 operand in global memory
IteratorB1 iterator_B1,
///< initial value of accumulator
FragmentC0 const &src_accum,
///< epilogue operation after 1st Gemm
OutputOp output_op_0)
{
//
// Prologue
//
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations_0) {
iterator_A0.clear_mask(gemm_k_iterations_0 == 0);
iterator_B0.clear_mask(gemm_k_iterations_0 == 0);
iterator_A0.set_iteration_index(0);
this->smem_iterator_A0_.set_iteration_index(0);
// Load for operand A
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsA0; ++j) {
typename IteratorA0::AccessType *dst_ptr =
reinterpret_cast<typename IteratorA0::AccessType *>(
this->smem_iterator_A0_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorA0::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorA0::Element>::value *
IteratorA0::ThreadMap::kElementsPerAccess /
IteratorA0::kAccessesPerVector / 8;
int src_bytes = (iterator_A0.valid() ? kSrcBytes : 0);
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA0>(
dst_ptr + v, iterator_A0.get(), iterator_A0.valid());
++iterator_A0;
}
++this->smem_iterator_A0_;
}
iterator_B0.set_iteration_index(0);
this->smem_iterator_B0_.set_iteration_index(0);
// Load for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsB0; ++j) {
typename IteratorB0::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB0::AccessType *>(
this->smem_iterator_B0_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB0::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB0::Element>::value *
IteratorB0::ThreadMap::kElementsPerAccess /
IteratorB0::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB0>(
dst_ptr + v, iterator_B0.get(), iterator_B0.valid());
++iterator_B0;
}
++this->smem_iterator_B0_;
}
// Move to the next stage
iterator_A0.add_tile_offset({0, 1});
iterator_B0.add_tile_offset({1, 0});
this->smem_iterator_A0_.add_tile_offset({0, 1});
this->smem_iterator_B0_.add_tile_offset({1, 0});
// Defines the boundary of a stage of cp.async.
cutlass::arch::cp_async_fence();
}
// Perform accumulation in the 'd' output operand
FragmentC0 accum0 = src_accum;
// DEPBAR+SYNC
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA0 warp_loaded_frag_A0[2];
WarpLoadedFragmentB0 warp_loaded_frag_B0[2];
WarpTransformedFragmentA0 warp_transformed_frag_A0[2];
WarpTransformedFragmentB0 warp_transformed_frag_B0[2];
Operator0 warp_mma0;
this->warp_tile_iterator_A0_.set_kgroup_index(0);
this->warp_tile_iterator_B0_.set_kgroup_index(0);
this->warp_tile_iterator_A0_.load(warp_loaded_frag_A0[0]);
this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[0]);
++this->warp_tile_iterator_A0_;
++this->warp_tile_iterator_B0_;
iterator_A0.clear_mask(gemm_k_iterations_0 == 0);
iterator_B0.clear_mask(gemm_k_iterations_0 == 0);
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma0.transform(warp_transformed_frag_A0[0], warp_transformed_frag_B0[0],
warp_loaded_frag_A0[0], warp_loaded_frag_B0[0]);
//
// Mainloop
//
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations_0 > (-Base::kStages + 1);) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_A0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);
this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);
this->warp_tile_iterator_A0_.load(warp_loaded_frag_A0[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A0_;
++this->warp_tile_iterator_B0_;
if (warp_mma_k > 0)
warp_mma0.transform(warp_transformed_frag_A0[warp_mma_k % 2],
warp_transformed_frag_B0[warp_mma_k % 2],
warp_loaded_frag_A0[warp_mma_k % 2],
warp_loaded_frag_B0[warp_mma_k % 2]);
warp_mma0(
accum0,
warp_transformed_frag_A0[warp_mma_k % 2],
warp_transformed_frag_B0[warp_mma_k % 2],
accum0
);
// Issue global->shared copies for the this stage
if (warp_mma_k < Base::kWarpGemmIterations0 - 1) {
int group_start_iteration_A0, group_start_iteration_B0;
group_start_iteration_A0 = warp_mma_k * Detail::kAccessesPerGroupA0;
group_start_iteration_B0 = warp_mma_k * Detail::kAccessesPerGroupB0;
copy_tiles_and_advance_0(iterator_A0, iterator_B0, group_start_iteration_A0,
group_start_iteration_B0);
}
if (warp_mma_k + 2 == Base::kWarpGemmIterations0) {
int group_start_iteration_A0, group_start_iteration_B0;
group_start_iteration_A0 =
(warp_mma_k + 1) * Detail::kAccessesPerGroupA0;
group_start_iteration_B0 =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB0;
copy_tiles_and_advance_0(iterator_A0, iterator_B0, group_start_iteration_A0,
group_start_iteration_B0);
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages have committed.
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_A0.add_tile_offset({0, 1});
iterator_B0.add_tile_offset({1, 0});
this->smem_iterator_A0_.add_tile_offset({0, 1});
this->smem_iterator_B0_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_A0_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B0_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_A0_.add_tile_offset(
{0, -Base::kStages * Policy0::kPartitionsK *
Base::kWarpGemmIterations0});
this->warp_tile_iterator_B0_.add_tile_offset(
{-Base::kStages * Policy0::kPartitionsK *
Base::kWarpGemmIterations0,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
--gemm_k_iterations_0;
iterator_A0.clear_mask(gemm_k_iterations_0 == 0);
iterator_B0.clear_mask(gemm_k_iterations_0 == 0);
}
// Do any conversions feeding the first stage at the end of the loop so
// we can start right away on mma instructions
if (warp_mma_k + 1 == Base::kWarpGemmIterations0)
warp_mma0.transform(warp_transformed_frag_A0[(warp_mma_k + 1) % 2],
warp_transformed_frag_B0[(warp_mma_k + 1) % 2],
warp_loaded_frag_A0[(warp_mma_k + 1) % 2],
warp_loaded_frag_B0[(warp_mma_k + 1) % 2]);
}
}
// Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
// 2nd Gemm
/// Iterator to load a warp-scoped tile of A1 operand from intermediate accumulator tile
FragmentIteratorA1 warp_tile_iterator_A1_(accum0);
FragmentA1ScaleBias tb_frag_A1_scale;
FragmentA1ScaleBias tb_frag_A1_bias;
FragmentIteratorA1ScaleBias warp_tile_iterator_A1_scale_(tb_frag_A1_scale);
FragmentIteratorA1ScaleBias warp_tile_iterator_A1_bias_(tb_frag_A1_bias);
if(PerChannelScale) {
tb_frag_A1_scale.clear();
iterator_A1_scale.load(tb_frag_A1_scale);
++iterator_A1_scale;
}
tb_frag_A1_bias.clear();
iterator_A1_bias.load(tb_frag_A1_bias);
++iterator_A1_bias;
//
// Prologue
//
int gemm_k_iterations_1 = (FragmentIteratorA1::Policy::kIterations + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1;
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < Base::kStages - 1;
++stage, --gemm_k_iterations_1) {
iterator_B1.clear_mask(gemm_k_iterations_1 == 0);
iterator_B1.set_iteration_index(0);
this->smem_iterator_B1_.set_iteration_index(0);
// Load for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsB1; ++j) {
typename IteratorB1::AccessType *dst_ptr =
reinterpret_cast<typename IteratorB1::AccessType *>(
this->smem_iterator_B1_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB1::Element>::value *
IteratorB1::ThreadMap::kElementsPerAccess /
IteratorB1::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>(
dst_ptr + v, iterator_B1.get(), iterator_B1.valid());
++iterator_B1;
}
++this->smem_iterator_B1_;
}
// Move to the next stage
iterator_B1.add_tile_offset({1, 0});
this->smem_iterator_B1_.add_tile_offset({1, 0});
// Defines the boundary of a stage of cp.async.
cutlass::arch::cp_async_fence();
}
// DEPBAR+SYNC
cutlass::arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA1 warp_loaded_frag_A1[2];
WarpLoadedFragmentA1ScaleBias warp_loaded_frag_A1_scale[2];
WarpLoadedFragmentA1ScaleBias warp_loaded_frag_A1_bias[2];
WarpLoadedFragmentB1 warp_loaded_frag_B1[2];
WarpTransformedFragmentA1 warp_transformed_frag_A1[2];
WarpTransformedFragmentB1 warp_transformed_frag_B1[2];
Operator1 warp_mma1;
if(PerChannelScale) {
warp_tile_iterator_A1_scale_.load(warp_loaded_frag_A1_scale[0]);
++warp_tile_iterator_A1_scale_;
}
warp_tile_iterator_A1_bias_.load(warp_loaded_frag_A1_bias[0]);
++warp_tile_iterator_A1_bias_;
warp_tile_iterator_A1_.load(warp_loaded_frag_A1[0],
warp_loaded_frag_A1_scale[0],
warp_loaded_frag_A1_bias[0],
output_op_0);
++warp_tile_iterator_A1_;
this->warp_tile_iterator_B1_.set_kgroup_index(0);
this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[0]);
++this->warp_tile_iterator_B1_;
iterator_B1.clear_mask(gemm_k_iterations_1 == 0);
smem_write_stage_idx = Base::kStages - 1;
smem_read_stage_idx = 0;
warp_mma1.transform(warp_transformed_frag_A1[0], warp_transformed_frag_B1[0],
warp_loaded_frag_A1[0], warp_loaded_frag_B1[0]);
//
// Mainloop
//
gemm_k_iterations_1 = (FragmentIteratorA1::Policy::kIterations + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1 - (Base::kStages - 1);
CUTLASS_PRAGMA_UNROLL
for (; gemm_k_iterations_1 > (-Base::kStages + 1); gemm_k_iterations_1--) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1;
++warp_mma_k) {
// Load threadblock-level scale/bias vector from global memory
if (warp_mma_k + 1 == Base::kWarpGemmIterations1) {
if(PerChannelScale) {
tb_frag_A1_scale.clear();
iterator_A1_scale.load(tb_frag_A1_scale);
++iterator_A1_scale;
}
tb_frag_A1_bias.clear();
iterator_A1_bias.load(tb_frag_A1_bias);
++iterator_A1_bias;
}
// Load warp-level scale bias fragment from threadblock scale/bias vector
if(PerChannelScale) {
warp_tile_iterator_A1_scale_.load(warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2]);
++warp_tile_iterator_A1_scale_;
}
warp_tile_iterator_A1_bias_.load(warp_loaded_frag_A1_bias[(warp_mma_k + 1) % 2]);
++warp_tile_iterator_A1_bias_;
// Load warp-level tile from accumulator fragment
warp_tile_iterator_A1_.load(warp_loaded_frag_A1[(warp_mma_k + 1) % 2],
warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2],
warp_loaded_frag_A1_bias[(warp_mma_k + 1) % 2],
output_op_0);
++warp_tile_iterator_A1_;
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations1);
this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_B1_;
if (warp_mma_k > 0)
warp_mma1.transform(warp_transformed_frag_A1[warp_mma_k % 2],
warp_transformed_frag_B1[warp_mma_k % 2],
warp_loaded_frag_A1[warp_mma_k % 2],
warp_loaded_frag_B1[warp_mma_k % 2]);
warp_mma1(
accum,
warp_transformed_frag_A1[warp_mma_k % 2],
warp_transformed_frag_B1[warp_mma_k % 2],
accum
);
// Issue global->shared copies for the this stage
if (warp_mma_k < Base::kWarpGemmIterations1 - 1) {
int group_start_iteration_B1;
group_start_iteration_B1 = warp_mma_k * Detail::kAccessesPerGroupB1;
copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1);
}
if (warp_mma_k + 2 == Base::kWarpGemmIterations1) {
int group_start_iteration_B1;
group_start_iteration_B1 =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB1;
copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1);
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages have committed.
arch::cp_async_wait<Base::kStages - 2>();
__syncthreads();
// Move to the next stage
iterator_B1.add_tile_offset({1, 0});
this->smem_iterator_B1_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_B1_.add_tile_offset(
{-Base::kStages * Policy1::kPartitionsK *
Base::kWarpGemmIterations1,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
iterator_B1.clear_mask(gemm_k_iterations_1 == 1);
}
// Do any conversions feeding the first stage at the end of the loop so
// we can start right away on mma instructions
if (warp_mma_k + 1 == Base::kWarpGemmIterations1)
warp_mma1.transform(warp_transformed_frag_A1[(warp_mma_k + 1) % 2],
warp_transformed_frag_B1[(warp_mma_k + 1) % 2],
warp_loaded_frag_A1[(warp_mma_k + 1) % 2],
warp_loaded_frag_B1[(warp_mma_k + 1) % 2]);
}
}
// Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/threadblock/b2b_mma_multistage.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/threadblock/b2b_mma_multistage.h",
"repo_id": "examples",
"token_count": 15041
} | 3 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
The convolution version of 12_gemm_bias_relu. Similarly, we put bias vector in Operand C and the
rest is the same as normal convolution.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 4;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue, // Data type for alpha in linear combination
cutlass::epilogue::thread::ScaleType::NoBetaScaling>; // alpha X C + per channel bias
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
int run() {
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
{1, 7, 7, 512}, // activation
{512, 3, 3, 512}, // filter
{1, 1, 1, 1}, // padding
{1, 1}, // striding
{1, 1}, // dilation
cutlass::conv::Mode::kCrossCorrelation, // mode (convolution or cross-correlation)
1 // split-k slices
);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(problem_size.activation_extent());
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(problem_size.filter_extent());
// Create tensor C with dimensions 1x1x1xk which is the bias vector
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias({1, 1, 1, problem_size.K});
// Create tensor D used to store output from CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(problem_size.output_extent());
// Create matrix D with dimensions M x N used to store output from reference
// kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(problem_size.output_extent());
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c_bias.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c_bias.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(), // <- reference to tensor A on device
tensor_b.device_ref(), // <- reference to tensor B on device
// tensor C is treated as the bias vector. We can enable the CONV
// to project away the N, H, W dimension by setting the stride to zero.
{tensor_c_bias.device_data(), LayoutOutput::Stride(0)},
tensor_d.device_ref(), // <- reference to tensor D on device
{alpha} };
// Instantiate CUTLASS kernel depending on templates
ImplicitGemm implicit_gemm_op;
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check the problem size is supported or not
cutlass::Status status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = implicit_gemm_op();
CUTLASS_CHECK(status);
//
// Create instantiation for device reference conv kernel
//
// Launch device reference to compute strictly the product A * B
cutlass::reference::device::Conv2d<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>>
(
cutlass::conv::Operator::kFprop,
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c_bias.device_ref(),
tensor_ref_d.device_ref(),
alpha, ElementComputeEpilogue(0)
);
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Compute bias + relu in host code
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
for (int k = 0; k < problem_size.K; ++k) {
tensor_ref_d.at({n, p, q, k}) =
std::max(ElementOutput(0),
ElementOutput(tensor_ref_d.at({n, p, q, k}) +
tensor_c_bias.at({0, 0, 0, k})));
}
}
}
}
// Check if output from CUTLASS kernel and reference kernel are equal or not
std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(),
tensor_ref_d.host_view())
? "Passed"
: "Failed")
<< std::endl;
CUTLASS_CHECK(status);
return 0;
}
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
return run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/17_fprop_per_channel_bias/fprop_per_channel_bias.cu/0 | {
"file_path": "examples/17_fprop_per_channel_bias/fprop_per_channel_bias.cu",
"repo_id": "examples",
"token_count": 4569
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to fuse per channel scale+bias+relu of the activations
into the 3D fprop mainloop.
Compared with original 3D fprop kernel, this example has two more vectors, one for
the scale and one for the bias. The length of the vectors is the same as the
activation channel number. This kernel loads the vectors when the associated
activation channels are loaded in the mainloop. Between reading the
activations and scale/bias data from the shared memory and calling tensor core
instructions, scale+bias+relu is computed in the register file.
This example is customized for Ampere 16816 fp16 tensor core instruction.
Changing to different data types or different tensor core instruction require
source code changing. See
include/cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h for more
technical details.
This example is modified based on 25_ampere_fprop_mainloop_fusion. The command
line is the same.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv3d_fprop_fusion.h"
#include "cutlass/conv/device/implicit_gemm_convolution_fusion.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementInputScaleBias = cutlass::half_t; // Data type of elements in input sclae and bias vectors
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNDHWC;
using LayoutInputB = cutlass::layout::TensorNDHWC;
using LayoutInputScaleBias = cutlass::layout::RowMajor;
using LayoutOutput = cutlass::layout::TensorNDHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 4;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
using Conv3dFpropFusionKernel = typename cutlass::conv::kernel::DefaultConv3dFpropFusion<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementInputScaleBias, LayoutInputScaleBias,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemmFusion = cutlass::conv::device::ImplicitGemmConvolutionFusion<Conv3dFpropFusionKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor5DCoord input_size;
cutlass::Tensor5DCoord filter_size;
cutlass::Coord<3> padding;
cutlass::Coord<3> conv_stride;
cutlass::Coord<3> dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32, 32),
filter_size(32, 3, 3, 3, 32),
padding(cutlass::make_Coord(1, 1, 1)),
conv_stride(cutlass::make_Coord(1, 1, 1)),
dilation(cutlass::make_Coord(1, 1, 1)),
reference_check(true),
measure_performance(false),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding[0] != filter_size.d() / 2) ||
(padding[1] != filter_size.h() / 2) ||
(padding[2] != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor5DCoord input_size,
cutlass::Tensor5DCoord filter_size,
cutlass::Coord<3> stride) {
this->input_size = input_size;
this->filter_size = filter_size;
conv_stride = stride;
padding[0] = filter_size.d() / 2;
padding[1] = filter_size.h() / 2;
padding[2] = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("d", input_size.d());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("t", filter_size.d());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.d() == 3 && filter_size.h() == 3 && filter_size.w() == 3) {
padding = cutlass::make_Coord(1, 1, 1);
}
else {
filter_size.d() = 1;
filter_size.h() = 1;
filter_size.w() = 1;
padding = cutlass::make_Coord(0, 0, 0);
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "25_ampere_3d_fprop_mainloop_fusion example\n\n"
<< " This example fuses scale+bias+relu of the activations into Ampere's\n"
<< " Tensor Core operators on F16 data types to compute\n"
<< " forward convolution on tensors of layout NDHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n <int> Input tensor extent N\n"
<< " --d <int> Input tensor extent D\n"
<< " --h <int> Input tensor extent H\n"
<< " --w <int> Input tensor extent W\n"
<< " --c <int> Input tensor extent C\n"
<< " --k <int> Filter extent K\n"
<< " --t <int> Filter extent T\n"
<< " --r <int> Filter extent R\n"
<< " --s <int> Filter extent S\n\n"
<< " --alpha <float> Epilogue scalar alpha\n"
<< " --beta <float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations <int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag <string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./25_ampere_3d_fprop_mainloop_fusion --n=32 --d=96 --h=96 --w=96 --c=64 --k=64 --t=1 --r=1 --s=1\n\n"
<< "$ ./25_ampere_3d_fprop_mainloop_fusion --n=1 --d=224 --h=224 --w=224 --c=32 --k=32 --t=3 --r=3 --s=3 --ref-check\n\n"
<< "$ ./25_ampere_3d_fprop_mainloop_fusion --n=19 --d=94 --h=96 --w=96 --c=128 --k=128 --t=1 --r=1 --s=1\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor5DCoord output_size() const {
return cutlass::Tensor5DCoord(
input_size.n(),
(input_size.d() + padding[0] + padding[0] - filter_size.d()) / conv_stride[0] + 1,
(input_size.h() + padding[1] + padding[1] - filter_size.h()) / conv_stride[1] + 1,
(input_size.w() + padding[2] + padding[2] - filter_size.w()) / conv_stride[2] + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.d() * filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,D,H,W,C,K,T,R,S,Stride_D,Stride_H,Stride_W,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.d() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.d() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< options.conv_stride[0] << ","
<< options.conv_stride[1] << ","
<< options.conv_stride[2] << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_transformed_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias>
tensor_a_scale({1, options.input_size.c()});
cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias>
tensor_a_bias({1, options.input_size.c()});
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill scale vector for tensor A on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a_scale.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill bias vector for tensor A on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a_bias.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill tensor C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(7),
ElementOutput(-8),
0);
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_a_scale.sync_device();
tensor_a_bias.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv3dProblemSize with user defined output size
cutlass::conv::Conv3dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
typename ImplicitGemmFusion::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_a_scale.device_ref(),
tensor_a_bias.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemmFusion implicit_gemm_fusion_op;
size_t workspace_size = implicit_gemm_fusion_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_fusion_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_fusion_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_fusion_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on device...\n";
// Compute scale + bias + relu in host code
for (int n = 0; n < options.input_size.n(); ++n) {
for (int d = 0; d < options.input_size.d(); ++d) {
for (int h = 0; h < options.input_size.h(); ++h) {
for (int w = 0; w < options.input_size.w(); ++w) {
for (int c = 0; c < options.input_size.c(); ++c) {
tensor_transformed_a.at({n, d, h, w, c}) = std::max(
ElementOutput(0), ElementOutput(tensor_a.at({n, d, h, w, c}) *
tensor_a_scale.at({0, c}) +
tensor_a_bias.at({0, c})));
}
}
}
}
}
tensor_transformed_a.sync_device();
// Compute with reference implementation
cutlass::reference::device::Conv3dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_transformed_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_ref_d.device_ref(),
options.alpha,
options.beta
);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_d.sync_host();
tensor_ref_d.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "25_ampere_3d_fprop_mainloop_fusion"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_d.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_fusion_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv3dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "This test must run on SM80 or above.\n";
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {34, 18};
struct Benchmark {
int d, h, w, c, k, t, r, s, stride_d, stride_h, stride_w;
} layers[] = {
{56, 56, 56, 64, 256, 1, 1, 1, 1, 1, 1},
{56, 56, 56, 64, 64, 1, 1, 1, 1, 1, 1},
{56, 56, 56, 64, 64, 3, 3, 3, 1, 1, 1},
{56, 56, 56, 256, 64, 1, 1, 1, 1, 1, 1},
{56, 56, 56, 256, 512, 1, 1, 1, 2, 2, 2},
{56, 56, 56, 256, 128, 1, 1, 1, 1, 1, 1},
{56, 56, 56, 128, 128, 3, 3, 3, 2, 2, 2},
{28, 28, 28, 128, 512, 1, 1, 1, 1, 1, 1},
{28, 28, 28, 512, 128, 1, 1, 1, 1, 1, 1},
{28, 28, 28, 128, 128, 3, 3, 3, 1, 1, 1},
{28, 28, 28, 512, 1024, 1, 1, 1, 2, 2, 2},
{28, 28, 28, 512, 256, 1, 1, 1, 1, 1, 1},
{28, 28, 28, 256, 256, 3, 3, 3, 2, 2, 2},
{14, 14, 14, 256, 1024, 1, 1, 1, 1, 1, 1},
{14, 14, 14, 1024, 256, 1, 1, 1, 1, 1, 1},
{14, 14, 14, 256, 256, 3, 3, 3, 1, 1, 1},
{14, 14, 14, 1024, 2048, 1, 1, 1, 2, 2, 2},
{14, 14, 14, 1024, 512, 1, 1, 1, 1, 1, 1},
{14, 14, 14, 512, 512, 3, 3, 3, 2, 2, 2},
{ 7, 7, 7, 512, 2048, 1, 1, 1, 1, 1, 1},
{ 7, 7, 7, 2048, 512, 1, 1, 1, 1, 1, 1},
{ 7, 7, 7, 512, 512, 3, 3, 3, 1, 1, 1},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.d, layer.h, layer.w, layer.c},
{layer.k, layer.t, layer.r, layer.s, layer.c},
cutlass::make_Coord(layer.stride_d, layer.stride_h, layer.stride_w));
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/25_ampere_fprop_mainloop_fusion/ampere_3d_fprop_mainloop_fusion.cu/0 | {
"file_path": "examples/25_ampere_fprop_mainloop_fusion/ampere_3d_fprop_mainloop_fusion.cu",
"repo_id": "examples",
"token_count": 10476
} | 5 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/arch/mma.h"
////////////////////////////////////////////////////////////////////////////////
// Some helper functions
////////////////////////////////////////////////////////////////////////////////
#define DISPATCH_TYPES(tensor, func) \
{ \
if (query.scalar_type() == at::ScalarType::Float) { \
using scalar_t = float; \
func(); \
} else if (query.scalar_type() == at::ScalarType::Half) { \
using scalar_t = cutlass::half_t; \
func(); \
} else if (query.scalar_type() == at::ScalarType::BFloat16) { \
using scalar_t = cutlass::bfloat16_t; \
func(); \
} else { \
XFORMERS_CHECK(false, "Only fp32, half & bf16 supported at the moment"); \
} \
}
#define DISPATCH_BOOL(BOOL_V, BOOL_NAME, F) \
{ \
if (BOOL_V) { \
constexpr bool BOOL_NAME = true; \
F(); \
} else { \
constexpr bool BOOL_NAME = false; \
F(); \
} \
}
#define DISPATCH_ARCHTAG(CC, func) \
{ \
if (CC >= 80) { \
using ArchTag = cutlass::arch::Sm80; \
func(); \
} else if (CC >= 75) { \
using ArchTag = cutlass::arch::Sm75; \
func(); \
} else if (CC >= 70) { \
using ArchTag = cutlass::arch::Sm70; \
func(); \
} else if (CC >= 50) { \
using ArchTag = cutlass::arch::Sm50; \
func(); \
} else { \
XFORMERS_CHECK( \
false, \
"Your device is too old. We require compute capability >= 50"); \
} \
}
#define CHECK_NOSPARSE_CONTIGUOUS_CUDA(TENSOR) \
XFORMERS_CHECK(TENSOR.is_cuda(), #TENSOR " must be a CUDA tensor"); \
XFORMERS_CHECK(!TENSOR.is_sparse(), #TENSOR " must be a dense tensor"); \
XFORMERS_CHECK(TENSOR.is_contiguous());
#define CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(TENSOR) \
XFORMERS_CHECK(TENSOR.is_cuda(), #TENSOR " must be a CUDA tensor"); \
XFORMERS_CHECK(!TENSOR.is_sparse(), #TENSOR " must be a dense tensor"); \
XFORMERS_CHECK( \
TENSOR.stride(-1) == 1, #TENSOR ": last dimension must be contiguous");
#ifdef TORCH_CHECK
#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \
XFORMERS_CHECK( \
uint64_t(PTR) % ALIGNMENT == 0, #PTR " is not correctly aligned")
#define XFORMERS_CHECK TORCH_CHECK
#elif defined(__CUDACC_RTC__)
#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \
if (!(uint64_t(PTR) % ALIGNMENT == 0)) { \
return false; \
}
#define XFORMERS_CHECK(COND, ERR) \
if (!(COND)) { \
return false; \
}
#else
#include <iostream>
#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \
if (!(uint64_t(PTR) % ALIGNMENT == 0)) { \
std::cerr << #PTR " is not correctly aligned\n"; \
return false; \
}
#define XFORMERS_CHECK(COND, ERR) \
if (!(COND)) { \
std::cerr << "'" #COND "' failed: " << ERR << "\n"; \
return false; \
}
#endif
#define ASSIGN_CHECK_OVERFLOW(A, B) \
{ \
A = B; \
XFORMERS_CHECK( \
B < std::numeric_limits<decltype(A)>::max(), #B " overflows"); \
}
namespace gemm_kernel_utils {
template <typename integer>
constexpr CUTLASS_HOST_DEVICE integer ceil_div(integer n, integer m) {
return (n + m - 1) / m;
}
template <typename integer>
constexpr CUTLASS_HOST_DEVICE integer align_up(integer n, integer m) {
return ((n + m - 1) / m) * m;
}
////////////////////////////////////////////////////////////////////////////////
// Determine the type of GEMM we do (TensorCores or not, Shapes ...)
// TODO: Maybe we could rely on Cutlass's DefaultGemm templates
////////////////////////////////////////////////////////////////////////////////
// Fallback to Simt (FMA on cuda cores) if not in a special case below
template <typename ArchTag, typename scalar_t_, typename Enable = void>
struct DefaultGemmType {
static constexpr int ThreadK = 8;
static constexpr int WarpK = 8;
static constexpr int kMinimumAlignment = 1;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using OpClass = cutlass::arch::OpClassSimt;
using Operator = cutlass::arch::OpMultiplyAdd;
};
// Specialization for tensorcores with f32
template <typename ArchTag>
struct DefaultGemmType<
ArchTag,
float,
typename cutlass::platform::enable_if<
ArchTag::kMinComputeCapability >= 80>::type> {
static constexpr int ThreadK = 32;
static constexpr int WarpK = 32;
static constexpr int kMinimumAlignment = 4;
using OpClass = cutlass::arch::OpClassTensorOp;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Operator = cutlass::arch::OpMultiplyAddFastF32;
};
// Specialization for tensorcores with f16/bf16 - Sm75+
template <typename ArchTag, typename scalar_t>
struct DefaultGemmType<
ArchTag,
scalar_t,
typename cutlass::platform::enable_if<
ArchTag::kMinComputeCapability >= 75 &&
cutlass::sizeof_bits<scalar_t>::value == 16>::type> {
static constexpr int ThreadK = 32;
static constexpr int WarpK = 32;
static constexpr int kMinimumAlignment = 4;
using OpClass = cutlass::arch::OpClassTensorOp;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Operator = cutlass::arch::OpMultiplyAdd;
};
// Specialization for tensorcores with f16 - Volta
template <>
struct DefaultGemmType<cutlass::arch::Sm70, cutlass::half_t, void> {
static constexpr int ThreadK = 32;
static constexpr int WarpK = 32;
static constexpr int kMinimumAlignment = 2;
using OpClass = cutlass::arch::OpClassTensorOp;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Operator = cutlass::arch::OpMultiplyAdd;
};
// Enables to do
// `auto x = kCondition ? fa(arg) : fb(arg)`
// when `fa` and `fb` have different types
template <bool kVal, typename TA, typename TB>
struct call_conditional;
template <typename TA, typename TB>
struct call_conditional<true, TA, TB> {
template <typename Arg>
static CUTLASS_HOST_DEVICE auto apply(TA ta, TB tb, Arg arg)
-> decltype(ta(arg)) {
return ta(arg);
}
};
template <typename TA, typename TB>
struct call_conditional<false, TA, TB> {
template <typename Arg>
static CUTLASS_HOST_DEVICE auto apply(TA ta, TB tb, Arg arg)
-> decltype(tb(arg)) {
return tb(arg);
}
};
////////////////////////////////////////////////////////////////////////////////
// Mark a variable as warp-uniform - enables some compiler optimizations
// The cheapest way to do it is just to broadcast it from lane 0
////////////////////////////////////////////////////////////////////////////////
template <typename T>
CUTLASS_DEVICE T warp_uniform(T value) {
struct {
union {
T value;
uint32_t asInt;
};
} p;
p.value = value;
p.asInt = __shfl_sync(0xffffffff, (unsigned)p.asInt, 0);
return p.value;
}
template <typename T>
CUTLASS_DEVICE T* warp_uniform(T* ptr) {
struct {
union {
T* ptr;
uint32_t asInt[2];
};
} p;
p.ptr = ptr;
p.asInt[0] = warp_uniform(p.asInt[0]);
p.asInt[1] = warp_uniform(p.asInt[1]);
return p.ptr;
}
} // namespace gemm_kernel_utils
| examples/41_fused_multi_head_attention/gemm_kernel_utils.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/gemm_kernel_utils.h",
"repo_id": "examples",
"token_count": 5441
} | 6 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import helper
import gen_ir as ir
class gen_turing_impl:
def __init__(self,fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.class_name = gen_class_name
self.gen_class_name = gen_class_name + "_turing_impl"
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.output_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
self.gen_turing_unfused = gen_volta_turing_fuse_act_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
def gen_using(self):
code_using = "using b2b_gemm = typename cutlass::gemm::device::" + self.class_name + "<cutlass::half_t>;"
return code_using + "\n"
def gen_initialize(self):
code = ""
for i in range(self.b2b_num):
code_this = ""
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " alpha", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(1);\n"
beta = "(1)"
if helper.get_epilogue_add_bias_or_not(self.fuse_gemm_info[i]) is False:
beta = "(0)"
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " beta", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + beta + ";\n"
k_str = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
k_str = "K0"
code_this += helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + "(M, " + str(self.fuse_gemm_info[i]['mnk'][1]) + ", " + k_str + ");\n"
code += code_this
code += "typename b2b_gemm::Arguments arguments{\n"
for i in range(self.b2b_num):
code += " " + helper.var_idx("problem_size_", i) + ",\n"
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("A", 0) + "), " + helper.var_idx("problem_size_", 0) + ".k()},\n"
for i in range(self.b2b_num):
ldmB = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
ldmB = "K0"
if self.fuse_gemm_info[i]['B_format'] is 'Row':
ldmB = str(self.fuse_gemm_info[i]['mnk'][1])
ldmC = str(helper.get_epilogue_bias_ldm(self.fuse_gemm_info[i]))
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + "*>(" + helper.var_idx("B", i) + "), " + ldmB + "},\n"
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("C", i) + "), " + ldmC + "},\n"
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("D", self.b2b_num -1) + "), " + helper.var_idx("problem_size_", self.b2b_num - 1) + ".n()},\n"
for i in range(self.b2b_num):
code += " " + "{ " + helper.var_idx("alpha", i) + ", " + helper.var_idx("beta", i)
for epilogue_arg in helper.get_epilogue_args(self.fuse_gemm_info[i]):
arg_name = helper.var_idx("Epilogue", i) + "_" + epilogue_arg[1]
code += ", " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(" + str(arg_name) + ")"
code += "},\n"
code += " " + "Batch};\n\n"
code += " " "b2b_gemm gemm_op;\n"
code += " " + "gemm_op.initialize(arguments);\n"
return code + "\n"
def gen_run(self):
code = " " + "gemm_op(stream);\n"
return code
def gen_wrapper(self):
code_body = ""
arg_lists = []
arg_lists.append(["int", "M"])
arg_lists.append(["int", "K0"])
arg_lists.append(["int", "Batch"])
arg_lists.append(["void*", helper.var_idx("A", 0)])
for i in range(self.b2b_num):
arg_lists.append(["void*", helper.var_idx("B", i)])
arg_lists.append(["void*", helper.var_idx("C", i)])
arg_lists.append(["void*", helper.var_idx("D", i)])
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_tp = arg[0]
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
arg_lists.append([arg_tp, arg_name])
if self.b2b_num == 1:
code_body += self.gen_turing_unfused.gen_using(False) #False -> Turing, True -> Volta
code_body += self.gen_turing_unfused.gen_initialize()
code_body += self.gen_turing_unfused.gen_run()
else:
code_body += self.gen_using()
code_body += self.gen_initialize()
code_body += self.gen_run()
code = ir.gen_func(self.gen_class_name, arg_lists, code_body)
return code
def gen_code(self):
code = self.gen_wrapper()
helper.write_2_headfile("turing_impl.h", self.output_dir, self.user_header_file + "\n" + code)
class gen_volta_turing_fuse_act_impl:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.gen_class_name = gen_class_name + "_volta_impl"
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.output_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
def perf_tiling(self, layer_mnk):
mnk = layer_mnk[:]
block_tile = mnk[:]
block_tile[2] = 32 # force the K tile to be 32
# M tile gen
block_tile[0] = 32
# N tile gen
if mnk[1] > 128:
block_tile[1] = 256
elif mnk[1] > 64:
block_tile[1] = 128
elif mnk[1] > 32:
block_tile[1] = 64
else :
block_tile[1] = 32
warp_tile = block_tile[:]
if block_tile[1] == 256:
warp_tile[1] = 64
elif block_tile[1] == 128:
warp_tile[1] = 32
elif block_tile[1] == 64:
warp_tile[1] = 32
else :
warp_tile[1] = 32
warp_tile[0] = 32
return block_tile, warp_tile
def process_epilogue(self, epilogue_tp, n, C_tp, Acc_tp):
epilogue_setted_type = epilogue_tp
cutlass_epilogue_name = "LinearCombinationRelu"
if epilogue_setted_type.lower() == 'leakyrelu':
cutlass_epilogue_name = "LinearCombinationLeakyRelu"
elif epilogue_setted_type.lower() == 'identity':
cutlass_epilogue_name = "LinearCombination"
n_mod_8 = n % 4
N_align_elements = 1
if n_mod_8 == 0:
N_align_elements = 8
elif n_mod_8 == 4:
N_align_elements = 4
elif n_mod_8 == 2 or n_mod_8 == 6:
N_align_elements = 2
epilogue_str = "cutlass::epilogue::thread::" + cutlass_epilogue_name+ "<" + C_tp + ", " + str(N_align_elements) + ", " + Acc_tp + ", " + Acc_tp + ">"
return epilogue_str
def gen_using(self, volta = True):
code_using = ""
volta_arch = "cutlass::arch::Sm70"
volta_tc = "cutlass::gemm::GemmShape<8, 8, 4>"
turing_arch = "cutlass::arch::Sm75"
turing_tc = "cutlass::gemm::GemmShape<16, 8, 8>"
arch = ""
tc = ""
if volta:
arch = volta_arch
tc = volta_tc
else:
arch = turing_arch
tc = turing_tc
for i in range(self.b2b_num):
k = self.fuse_gemm_info[i]['mnk'][2]
k_mod_8 = k % 4
ab_ldm = 1
if k_mod_8 == 0:
ab_ldm = 8
elif k_mod_8 == 4:
ab_ldm = 4
elif k_mod_8 == 2 or k_mod_8 == 6:
ab_ldm = 2
block_tile, warp_tile = self.perf_tiling(self.fuse_gemm_info[i]['mnk'])
this_gemm_config = helper.var_idx("using Gemm", i) + " = cutlass::gemm::device::GemmBatched<\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_format']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_format']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_format']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + ",\n"
this_gemm_config += " " + "cutlass::arch::OpClassTensorOp,\n"
this_gemm_config += " " + arch + ",\n"
this_gemm_config += " " + "cutlass::gemm::GemmShape<" + str(block_tile[0]) + ", " + str(block_tile[1]) + ", " + str(block_tile[2]) + ">,\n"
this_gemm_config += " " + "cutlass::gemm::GemmShape<" + str(warp_tile[0]) + ", " + str(warp_tile[1]) + ", " + str(warp_tile[2]) + ">,\n"
this_gemm_config += " " + tc + ",\n"
this_gemm_config += " " + self.process_epilogue(helper.get_epilogue_tp(self.fuse_gemm_info[i]), self.fuse_gemm_info[i]['mnk'][1], helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']), helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp'])) + ",\n"
this_gemm_config += " " + "cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,\n"
this_gemm_config += " " + "2,\n"
this_gemm_config += " " + str(ab_ldm) + ",\n"
this_gemm_config += " " + str(ab_ldm) + ">;\n"
code_using += this_gemm_config + "\n"
return code_using + "\n"
def gen_initialize(self):
code = ""
for i in range(self.b2b_num):
code_this = ""
N_str = str(self.fuse_gemm_info[i]['mnk'][1])
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " alpha", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(1);\n"
beta = "(1)"
if helper.get_epilogue_add_bias_or_not( self.fuse_gemm_info[i]) is False:
beta = "(0)"
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " beta", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + beta + ";\n"
k_str = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
k_str = "K0"
code_this += helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + "(M, " + str(self.fuse_gemm_info[i]['mnk'][1]) + ", " + k_str + ");\n"
code_this += helper.var_idx("typename Gemm", i) + helper.var_idx("::Arguments arguments_", i) + "{\n"
code_this += " " + helper.var_idx("problem_size_", i) + ",\n"
ldmA = k_str
ldmB = k_str
ldmC = str(self.fuse_gemm_info[i]['mnk'][1])
ldmBias = str(helper.get_epilogue_bias_ldm(self.fuse_gemm_info[i]))
if self.fuse_gemm_info[i]['A_format'] is 'Col':
ldmA = "M"
if self.fuse_gemm_info[i]['B_format'] is 'Row':
ldmB = str(self.fuse_gemm_info[i]['mnk'][1])
if self.fuse_gemm_info[i]['C_format'] is 'Col':
ldmC = "M"
if i == 0:
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("A", i) + "), " + ldmA + "}, " + "M * " + ldmA + ",\n"
else:
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("D", i - 1) + "), " + ldmA + "}, " + "M * " + ldmA + ",\n"
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + "*>(" + helper.var_idx("B", i) + "), " + ldmB + "}, " + N_str + " * " + ldmB + ",\n"
M_bias = str(helper.get_epilogue_bias_shape(self.fuse_gemm_info[i])[0])
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("C", i) + "), " + ldmBias + "}, " + M_bias + " * " + N_str + ",\n"
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("D", i) + "), " + ldmC + "}, " + "M * " + ldmC + ",\n"
code_this += " " + "{ " + helper.var_idx("alpha", i) + ", " + helper.var_idx("beta", i)
for epilogue_arg in helper.get_epilogue_args(self.fuse_gemm_info[i]):
arg_name = helper.var_idx("Epilogue", i) + "_" + epilogue_arg[1]
code_this += ", " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(" + str(arg_name) + ")"
code_this += " },\n"
code_this += " " + "Batch};\n"
code_this += " " + helper.var_idx("Gemm", i) + helper.var_idx(" gemm_op_", i) + ";\n"
code_this += " " + helper.var_idx("gemm_op_", i) + helper.var_idx(".initialize(arguments_", i) + ", nullptr);\n"
code += code_this + "\n"
return code + "\n"
def gen_run(self):
code = ""
for i in range(self.b2b_num):
code_this = ""
code_this += " " + helper.var_idx("gemm_op_", i) + "(stream);\n"
code += code_this
return code
def gen_wrapper(self):
code_body = ""
arg_lists = []
arg_lists.append(["int", "M"])
arg_lists.append(["int", "K0"])
arg_lists.append(["int", "Batch"])
arg_lists.append(["void*", helper.var_idx("A", 0)])
for i in range(self.b2b_num):
arg_lists.append(["void*", helper.var_idx("B", i)])
arg_lists.append(["void*", helper.var_idx("C", i)])
arg_lists.append(["void*", helper.var_idx("D", i)])
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_tp = arg[0]
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
arg_lists.append([arg_tp, arg_name])
code_body += self.gen_using()
code_body += self.gen_initialize()
code_body += self.gen_run()
code = ir.gen_func(self.gen_class_name, arg_lists, code_body)
return code
def gen_code(self):
code = self.gen_wrapper()
helper.write_2_headfile("volta_impl.h", self.output_dir, self.user_header_file + "\n" + code)
class gen_one_API:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.gen_class_name = gen_class_name
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.output_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
self.gen_volta = gen_volta_turing_fuse_act_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
self.gen_turing = gen_turing_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
def gen_CUTLASS_irrelevant_API(self):
code = ""
code += "#include <cuda_runtime.h>\n"
code += "#include <assert.h>\n"
param_name = "Fused" + str(self.b2b_num) + "xGemm_"
for i in range(self.b2b_num):
param_name += str(self.fuse_gemm_info[i]['mnk'][1]) + "_"
param_name += "Params"
params = ""
params += " " + "int M;\n"
params += " " + "int K0;\n"
params += " " + "int Batch;\n"
params += " " + "const void* A0;\n"
for i in range(self.b2b_num):
params += " " + "const void* " + helper.var_idx("B", i) + ";\n"
params += " " + "const void* " + helper.var_idx("C", i) + ";\n"
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_tp = arg[0]
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
params += " " + arg_tp + " " + arg_name + ";\n"
params += " " + "void* " + helper.var_idx("D", i) + ";\n"
code += ir.gen_struct(param_name, params)
code += "using Param = " + param_name + ";\n"
code += "void one_api( const Param & param, int sm, cudaStream_t stream);\n"
return code
def gen_one_api(self):
code = ""
code += "/* Auto Generated code - Do not edit.*/\n"
code += "#include \"cutlass_irrelevant.h\"\n"
code += "#include \"api.h\"\n"
code += "void one_api( const Param & param, int sm, cudaStream_t stream) {\n"
code += " " + "if (sm == 70) \n"
code += " " + " " + self.gen_class_name + "_volta_impl(param.M, param.K0, param.Batch, const_cast<void*>(param.A0), "
for i in range(self.b2b_num):
code += helper.var_idx("const_cast<void*>(param.B", i) + "), "
code += helper.var_idx("const_cast<void*>(param.C", i) + "), "
code += helper.var_idx("param.D", i) + ", "
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
code += "param." + arg_name + ", "
code += "stream);\n"
code += " " + "else if(sm >= 75) \n"
code += " " + " " + self.gen_class_name + "_turing_impl(param.M, param.K0, param.Batch, const_cast<void*>(param.A0), "
for i in range(self.b2b_num):
code += helper.var_idx("const_cast<void*>(param.B", i) + "), "
code += helper.var_idx("const_cast<void*>(param.C", i) + "), "
code += helper.var_idx("param.D", i) + ", "
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
code += "param." + arg_name + ", "
code += "stream);\n"
code += " " + "else assert(0);\n"
code += "}\n"
return code
def gen_code(self):
turing_code = self.gen_turing.gen_wrapper()
volta_code = self.gen_volta.gen_wrapper()
cutlass_irrelevant_code = self.gen_CUTLASS_irrelevant_API()
one_api_code = self.gen_one_api()
with open(self.output_dir + "one_api.cu", "w+") as f:
f.write(one_api_code)
helper.write_2_headfile("cutlass_irrelevant.h", self.output_dir, cutlass_irrelevant_code)
helper.write_2_headfile("api.h", self.output_dir, self.user_header_file + "\n" + turing_code + volta_code)
| examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_turing_and_volta.py/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_turing_and_volta.py",
"repo_id": "examples",
"token_count": 10964
} | 7 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/threadblock/mma_base.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy0_,
/// B1-specific version of the policy (concept: MmaPolicy)
typename Policy1_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class DualMmaBase {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Policy describing tuning details
using Policy0 = Policy0_;
using Policy1 = Policy1_;
//
// Dependent types
//
/// Warp-level Mma
using Operator0 = typename Policy0::Operator;
using Operator1 = typename Policy1::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm = typename Policy0::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount = GemmShape<Shape::kM / WarpGemm::kM,
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator0::Policy::MmaShape::kK);
/// Number of stages
static int const kStages = Stages;
/// Tensor reference to the A operand
using TensorRefA = TensorRef<typename Operator0::ElementA, typename Operator0::LayoutA>;
/// Tensor reference to the B operand
using TensorRefB0 = TensorRef<typename Operator0::ElementB, typename Operator0::LayoutB>;
using TensorRefB1 = TensorRef<typename Operator1::ElementB, typename Operator1::LayoutB>;
static_assert(kWarpGemmIterations > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
static_assert((kWarpGemmIterations % 2) == 0,
"Inner loop iteration must be an even number.");
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
class SharedStorage {
public:
//
// Type definitions
//
/// Shape of the A matrix operand in shared memory
using ShapeA = MatrixShape<Shape::kM + Policy0::SmemPaddingA::kRow,
Shape::kK * kStages +
Policy0::SmemPaddingA::kColumn>;
/// Shape of the B matrix operand in shared memory
using ShapeB0 =
MatrixShape<Shape::kK * kStages + Policy0::SmemPaddingB::kRow,
Shape::kN + Policy0::SmemPaddingB::kColumn>;
using ShapeB1 =
MatrixShape<Shape::kK * kStages + Policy1::SmemPaddingB::kRow,
Shape::kN + Policy1::SmemPaddingB::kColumn>;
public:
//
// Data members
//
/// Buffer for A operand
AlignedBuffer<typename Operator0::ElementA, ShapeA::kCount> operand_A;
/// Buffer for B operand
AlignedBuffer<typename Operator0::ElementB, ShapeB0::kCount> operand_B0;
AlignedBuffer<typename Operator1::ElementB, ShapeB1::kCount> operand_B1;
public:
//
// Methods
//
/// Returns a layout object for the A matrix
CUTLASS_DEVICE
static typename Operator0::LayoutA LayoutA() {
return Operator0::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator0::LayoutB LayoutB0() {
return Operator0::LayoutB::packed({ShapeB0::kRow, ShapeB0::kColumn});
}
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator1::LayoutB LayoutB1() {
return Operator1::LayoutB::packed({ShapeB1::kRow, ShapeB1::kColumn});
}
/// Returns a TensorRef to the A operand
CUTLASS_HOST_DEVICE
TensorRefA operand_A_ref() {
return TensorRefA{operand_A.data(), LayoutA()};
}
/// Returns a TensorRef to the B operand
CUTLASS_HOST_DEVICE
TensorRefB0 operand_B0_ref() {
return TensorRefB0{operand_B0.data(), LayoutB0()};
}
CUTLASS_HOST_DEVICE
TensorRefB1 operand_B1_ref() {
return TensorRefB1{operand_B1.data(), LayoutB1()};
}
};
protected:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A operand from shared memory
typename Operator0::IteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of B operand from shared memory
typename Operator0::IteratorB warp_tile_iterator_B0_;
typename Operator1::IteratorB warp_tile_iterator_B1_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
DualMmaBase(
///< Shared storage needed for internal use by threadblock-scoped GEMM
SharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx),
warp_tile_iterator_B0_(shared_storage.operand_B0_ref(), lane_idx),
warp_tile_iterator_B1_(shared_storage.operand_B1_ref(), lane_idx) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/45_dual_gemm/threadblock/dual_mma_base.h/0 | {
"file_path": "examples/45_dual_gemm/threadblock/dual_mma_base.h",
"repo_id": "examples",
"token_count": 2711
} | 8 |
Subsets and Splits