text
stringlengths 64
2.42M
| id
stringlengths 10
118
| metadata
dict | __index_level_0__
int64 0
65
|
---|---|---|---|
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#define CUTLASS_DEBUG_TRACE_LEVEL 1
#include "cutlass_unit_test.h"
#include <cutlass/trace.h>
#include <iostream>
#include <cute/tensor.hpp>
using namespace cute;
template <class LayoutA, class LayoutB>
void
test_distribute(LayoutA const& layoutA,
LayoutB const& layoutB)
{
auto layoutR = domain_distribute(shape(layoutA), shape(layoutB));
CUTLASS_TRACE_HOST("test_distribute()");
CUTLASS_TRACE_HOST(layoutA << " <-> " << layoutB);
CUTLASS_TRACE_HOST(" => ");
CUTLASS_TRACE_HOST(layoutR);
// Test that layout B is softly compatible with layout R
EXPECT_TRUE(softly_compatible(layoutB, layoutR));
// Post-condition on the codomain of the distribute
for (int i = 0; i < size(layoutR); ++i) {
for (int j = i+1; j < size(layoutR); ++j) {
EXPECT_TRUE(layoutR(i) < layoutR(j)); // Surjective and Ordered
}
}
}
TEST(CuTe_core, Distribute)
{
CUTLASS_TRACE_HOST("-------------------------------");
CUTLASS_TRACE_HOST("DOMAIN DISTRIBUTE" );
CUTLASS_TRACE_HOST("-------------------------------");
{
auto shape_a = Shape<Shape<_64,_3>,Shape<_8,_8>>{};
auto shape_b = _128{};
test_distribute(shape_a, shape_b);
}
{
auto shape_a = Shape<Int<192>,Shape<_8,_8>>{};
auto shape_b = _128{};
test_distribute(shape_a, shape_b);
}
{
auto shape_a = Shape<Shape<_64,_3>,Shape<_8,_8>>{};
auto shape_b = _128{} * _8{};
test_distribute(shape_a, shape_b);
}
{
auto shape_a = Shape<Int<192>,Shape<_8,_8>>{};
auto shape_b = _128{} * _8{};
test_distribute(shape_a, shape_b);
}
{
auto shape_a = Shape<Shape<_64,_3>>{};
auto shape_b = _128{};
test_distribute(shape_a, shape_b);
}
}
| test/unit/cute/core/domain_distribute.cpp/0 | {
"file_path": "test/unit/cute/core/domain_distribute.cpp",
"repo_id": "test",
"token_count": 1214
} | 46 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/layout/layout.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/util/host_tensor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, int N, typename Func>
__global__ void test_Epilogue_thread_activation(T *out, T *in) {
cutlass::Array<T, N> *vec_out = reinterpret_cast<cutlass::Array<T, N> *>(out);
cutlass::Array<T, N> *vec_in = reinterpret_cast<cutlass::Array<T, N> *>(in);
Func func;
vec_out[threadIdx.x] = func(vec_in[threadIdx.x]);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Reference
//
static double GELU_golden_input[] = {
1.587425827980, 1.157652974129, 0.750432848930, -0.965980410576,
-0.388184845448, 0.014422321692, 0.353164494038, 1.354383468628,
0.167588576674, 0.272798538208, -0.377032428980, 1.923444747925,
0.308164477348, -0.341318070889, 0.278338819742, -0.292668998241,
-1.051743745804, -0.814175724983, 0.112737402320, 1.262938618660,
-1.582363605499, 0.722016870975, 1.053453564644, -0.659764587879,
0.734917521477, 0.091274201870, 0.604461073875, -0.219043627381,
-0.136795744300, 0.960650205612, -1.805408835411, 0.091029644012,
-1.023343324661, 0.147713735700, -0.499895423651, 1.351878166199,
-1.631091356277, -0.336171895266, -1.612408638000, 0.090832948685,
-0.658132910728, -0.326727777719, -1.986387014389, 0.787685871124,
-1.015677452087, -0.225094825029, 0.876752018929, 0.744826257229,
0.870290279388, -0.757595360279, 1.510331749916, 0.750012576580,
0.906444966793, -0.915759027004, 1.260277032852, -0.158465340734,
-0.109191477299, -0.817102134228, 0.391305118799, -0.524910449982,
0.351349592209, 0.801979541779, 0.446691334248, -0.741077482700,
1.205966711044, -0.910210072994, 0.945986449718, 0.784096539021,
1.670521497726, 0.344931513071, -0.301411420107, 0.309870749712,
-0.879704594612, -1.951189517975, -0.805817663670, -0.661812782288,
-0.505914270878, -1.836273789406, -0.381845980883, -0.554707705975,
-0.375447630882, -0.516645610332, 0.509586095810, 1.087131023407,
2.664817094803, -1.558295488358, -0.076461032033, -0.504621028900,
1.327111959457, -1.819981694221, 1.350415468216, -2.074112653732,
1.501431345940, -1.339013576508, 0.162817999721, -1.473457217216,
0.357770472765, 0.188413277268, 1.601302266121, -0.653882205486,
0.856162548065, 0.763102591038, -0.526283502579, 0.581961452961,
0.089969776571, 1.968745589256, 0.545802056789, -1.168786048889,
1.206663012505, -0.109096683562, -1.223938226700, 0.744599223137,
-1.779406785965, 0.766436159611, -0.579044401646, -1.002057313919,
-0.715845823288, -0.562508940697, 0.886768460274, 2.327786445618,
-0.148763969541, -0.918884515762, -0.367678701878, -1.105021238327,
-0.461237311363, 0.158228352666, -0.254040330648, 1.427477598190,
0.277530491352, 0.046293262392, -0.535557329655, -1.486695051193,
-0.953706681728, -1.040495038033, -0.314667612314, 0.348172843456,
0.522773325443, 0.025960063562, -0.482472360134, 1.993084549904,
-0.253064930439, -0.012146313675, -2.166327714920, 0.398040622473,
-0.022238900885, -0.443580865860, -0.898376941681, -0.571689844131,
1.666979670525, -0.831176340580, -0.671057403088, 0.481970995665,
-1.096243023872, -1.493894338608, 0.596651911736, -0.229505166411,
1.165976166725, 0.905094027519, 0.049716457725, -1.362933635712,
-0.366948783398, 1.461613893509, -0.718411505222, 0.895385026932,
-0.763122260571, 1.329716682434, 1.366570711136, -0.086544901133,
0.059739742428, 0.940766513348, -0.272854357958, -1.738811373711,
-0.361239165068, 0.696977972984, 1.288442254066, 1.264815807343,
-0.573566436768, -1.141678214073, 0.081865988672, -0.886228799820,
-0.236933603883, 1.050115466118, -0.538952171803, 0.651773929596,
-0.220034509897, -1.198960781097, 1.247478365898, -0.053529661149,
0.639809548855, 1.672434806824, 0.511088073254, -1.179364681244,
-0.730427742004, 0.157630980015, 0.389369845390, -0.925578773022,
-0.093250080943, -0.391062080860, 0.852983593941, 1.868778109550,
-1.198786258698, 0.604997038841, -1.482687234879, -2.469333171844,
0.718807697296, -0.559609353542, 2.187228441238, -2.927527904510,
0.148535788059, -0.097280368209, 0.674131810665, -1.137645959854,
0.792729616165, -1.166317462921, -0.498791724443, 1.675866723061,
-0.137909621000, -0.653263568878, -2.281216144562, 0.296096831560,
2.002410173416, 1.083609819412, 0.933580815792, -1.504760265350,
2.185185909271, 0.286121010780, -1.035485863686, -0.216372340918,
-0.274334043264, -0.849510788918, -1.397169828415, -0.407644748688,
0.159476816654, -0.170650705695, 0.335193097591, -0.156852483749,
0.036168430001, 0.858105242252, -1.086121797562, 0.404813349247,
-0.481496721506, -0.389882832766, 0.020690204576, -0.772020936012,
-0.758921504021, 0.323482036591, 0.115715265274, -0.811228036880,
-0.882436633110, 0.176811277866, 1.678015947342, 0.379081040621,
-0.842976212502, 0.346952259541, -0.545828759670, 1.632800459862
};
static double GELU_golden_output[] = {
1.498199582100, 1.014679551125, 0.580462038517, -0.161344811320,
-0.135453075171, 0.007294139825, 0.225325092673, 1.235459089279,
0.094946734607, 0.165724009275, -0.133120641112, 1.871103763580,
0.191376730800, -0.125069886446, 0.169681981206, -0.112644664943,
-0.154036879539, -0.169163048267, 0.061428427696, 1.132469892502,
-0.089851818979, 0.552240371704, 0.899579226971, -0.168043658137,
0.565008401871, 0.048956073821, 0.439583092928, -0.090532489121,
-0.060955654830, 0.798911273479, -0.064101703465, 0.048816055059,
-0.156645998359, 0.082529976964, -0.154254898429, 1.232632875443,
-0.083896033466, -0.123835846782, -0.086161509156, 0.048703473061,
-0.167972877622, -0.121522113681, -0.046670529991, 0.617986679077,
-0.157319813967, -0.092503339052, 0.709896743298, 0.574865520000,
0.703132867813, -0.169963955879, 1.411436080933, 0.580042064190,
0.741154611111, -0.164741978049, 1.129479527473, -0.069256491959,
-0.049848672003, -0.169087052345, 0.255214750767, -0.157380074263,
0.223928079009, 0.632535398006, 0.300378054380, -0.169946283102,
1.068588852882, -0.165071934462, 0.783203184605, 0.614346146584,
1.591325283051, 0.219006344676, -0.115003645420, 0.192637458444,
-0.166712537408, -0.049788996577, -0.169361919165, -0.168130636215,
-0.155041679740, -0.060888241976, -0.134137839079, -0.160614117980,
-0.132782235742, -0.156389534473, 0.354075312614, 0.936574816704,
2.654553413391, -0.092845752835, -0.035900454968, -0.154874503613,
1.204704761505, -0.062572605908, 1.230982899666, -0.039479542524,
1.401402950287, -0.120890334249, 0.091938301921, -0.103604510427,
0.228880971670, 0.108285568655, 1.513783097267, -0.167782157660,
0.688394129276, 0.593158841133, -0.157540664077, 0.418839782476,
0.048209801316, 1.920528769493, 0.386099845171, -0.141709372401,
1.069367766380, -0.049809500575, -0.135230198503, 0.574639260769,
-0.066881760955, 0.596510827541, -0.162873372436, -0.158483341336,
-0.169686436653, -0.161375194788, 0.720409095287, 2.304597616196,
-0.065585561097, -0.164551988244, -0.131098195910, -0.148708447814,
-0.148663327098, 0.089060656726, -0.101548098028, 1.317959904671,
0.169103100896, 0.024001283571, -0.158595800400, -0.101909510791,
-0.162240833044, -0.155090972781, -0.118474565446, 0.221488356590,
0.365645468235, 0.013248858973, -0.151851043105, 1.946992278099,
-0.101253561676, -0.006014300976, -0.032804865390, 0.260597169399,
-0.010922161862, -0.145792976022, -0.165743649006, -0.162226170301,
1.587365984917, -0.168676435947, -0.168497130275, 0.330191940069,
-0.149622067809, -0.100989677012, 0.432351946831, -0.093922272325,
1.023946166039, 0.739726305008, 0.025843897834, -0.117827951908,
-0.130937814713, 1.356489539146, -0.169726014137, 0.729478538036,
-0.169943705201, 1.207641005516, 1.249209761620, -0.040288090706,
0.031292784959, 0.777626037598, -0.107090584934, -0.071350336075,
-0.129670530558, 0.527676224709, 1.161149263382, 1.134579420090,
-0.162394225597, -0.144757837057, 0.043603736907, -0.166386902332,
-0.096278958023, 0.895924389362, -0.158969298005, 0.484089732170,
-0.090857118368, -0.138206124306, 1.115107178688, -0.025622237474,
0.472724437714, 1.593463659286, 0.355387806892, -0.140493586659,
-0.169871479273, 0.088687323034, 0.253673940897, -0.164135158062,
-0.043161027133, -0.136040985584, 0.685087263584, 1.811169505119,
-0.138226687908, 0.440080583096, -0.102422207594, -0.016713079065,
0.549075841904, -0.161096408963, 2.155813455582, -0.005001218989,
0.083037458360, -0.044870752841, 0.505522191525, -0.145202502608,
0.623111069202, -0.141991063952, -0.154108211398, 1.597298502922,
-0.061391282827, -0.167753636837, -0.025704355910, 0.182520583272,
1.957115054131, 0.932696640491, 0.769961357117, -0.099604383111,
2.153636932373, 0.175279796124, -0.155551761389, -0.089653611183,
-0.107515335083, -0.168032020330, -0.113423995674, -0.139319628477,
0.089841812849, -0.073763631284, 0.211594089866, -0.068651281297,
0.018605981022, 0.690416753292, -0.150658726692, 0.266040354967,
-0.151710823178, -0.135800719261, 0.010515870526, -0.169883996248,
-0.169960290194, 0.202769815922, 0.063187584281, -0.169236257672,
-0.166577890515, 0.100812792778, 1.599699616432, 0.245525524020,
-0.168275654316, 0.220552831888, -0.159705042839, 1.549110531807
};
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_gelu_taylor, device_f32) {
int const kN = 256;
int const kV = 4;
using Element = float;
using Func = cutlass::epilogue::thread::GELU_taylor<cutlass::Array<Element, kV>>;
double tolerance = 0.005;
//
// Construct workspace
//
cutlass::HostTensor<Element, cutlass::layout::RowMajor> tensor_Destination({1, kN});
cutlass::HostTensor<Element, cutlass::layout::RowMajor> tensor_Source({1, kN});
for (int i = 0; i < kN; ++i) {
tensor_Source.host_data(i) = Element(GELU_golden_input[i]);
}
tensor_Destination.sync_device();
tensor_Source.sync_device();
//
// Launch the kernel
//
dim3 grid(1,1,1);
dim3 block(kN / kV, 1, 1);
test_Epilogue_thread_activation<Element, kV, Func><<< grid, block >>>(
tensor_Destination.device_data(),
tensor_Source.device_data());
tensor_Destination.sync_host();
//
// Verify
//
for (int i = 0; i < kN; ++i) {
Element input = Element(GELU_golden_input[i]);
Element got = tensor_Destination.host_data(i);
Element expected = Element(GELU_golden_output[i]);
double rel_error = (double(got) - double(expected)) / double(expected);
double tolerance_override = tolerance;
switch (i) {
case 142: tolerance_override = 0.008; break;
case 203: tolerance_override = 0.03; break;
case 207: tolerance_override = 0.09; break;
case 218: tolerance_override = 0.013; break;
}
EXPECT_LT(std::abs(rel_error), tolerance_override)
<< "Input[" << i << "]: " << input << ", Got: " << got << ", expected: " << expected;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_gelu_taylor, device_f16) {
int const kN = 256;
int const kV = 8;
using Element = cutlass::half_t;
using Func = cutlass::epilogue::thread::GELU_taylor<cutlass::Array<Element, kV>>;
double tolerance = 0.005;
//
// Construct workspace
//
cutlass::HostTensor<Element, cutlass::layout::RowMajor> tensor_Destination({1, kN});
cutlass::HostTensor<Element, cutlass::layout::RowMajor> tensor_Source({1, kN});
for (int i = 0; i < kN; ++i) {
tensor_Source.host_data(i) = Element(GELU_golden_input[i]);
}
tensor_Destination.sync_device();
tensor_Source.sync_device();
//
// Launch the kernel
//
dim3 grid(1,1,1);
dim3 block(kN / kV, 1, 1);
test_Epilogue_thread_activation<Element, kV, Func><<< grid, block >>>(
tensor_Destination.device_data(),
tensor_Source.device_data());
tensor_Destination.sync_host();
//
// Verify
//
for (int i = 0; i < kN; ++i) {
Element input = Element(GELU_golden_input[i]);
Element got = tensor_Destination.host_data(i);
Element expected = Element(GELU_golden_output[i]);
double rel_error = (double(got) - double(expected)) / double(expected);
double tolerance_override = tolerance;
switch (i) {
case 36: tolerance_override = 0.006; break;
case 77: tolerance_override = 0.009; break;
case 95: tolerance_override = 0.008; break;
case 112: tolerance_override = 0.007; break;
case 171: tolerance_override = 0.006; break;
case 203: tolerance_override = 0.03; break;
case 207: tolerance_override = 0.15; break;
}
EXPECT_LT(std::abs(rel_error), tolerance_override)
<< "Input[" << i << "]: " << input << ", Got: " << got << ", expected: " << expected;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/thread/activation.cu/0 | {
"file_path": "test/unit/epilogue/thread/activation.cu",
"repo_id": "test",
"token_count": 7577
} | 47 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for epilogues
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/half.h"
#include "cutlass/complex.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/util/host_tensor_planar_complex.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace kernel {
template <typename Epilogue>
__global__ void epilogue_planar_complex_threadblock(
typename Epilogue::OutputTileIterator::Params params_D,
typename Epilogue::OutputTileIterator::Element *ptr_D,
int64_t imaginary_stride_D,
typename Epilogue::OutputTileIterator::Params params_C,
typename Epilogue::OutputTileIterator::Element *ptr_C,
int64_t imaginary_stride_C,
typename Epilogue::OutputOp::Params params_output_op,
cutlass::MatrixCoord problem_size,
cutlass::TensorRef<
typename Epilogue::WarpMmaOperator::ElementC,
typename Epilogue::WarpMmaOperator::LayoutC> accumulator_ref,
int64_t imaginary_stride_accum,
int epilogue_count = 1) {
__shared__ typename Epilogue::SharedStorage shared_storage;
int thread_idx = threadIdx.x;
int warp_idx = threadIdx.x / 32;
int lane_idx = threadIdx.x % 32;
//
// Construct the epilogue
//
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_D_real(
params_D,
ptr_D,
problem_size,
thread_idx
);
typename Epilogue::OutputTileIterator iterator_D_imag(
params_D,
ptr_D + imaginary_stride_D,
problem_size,
thread_idx
);
// Tile iterator writing to output tile
typename Epilogue::OutputTileIterator iterator_C_real(
params_C,
ptr_C,
problem_size,
thread_idx
);
typename Epilogue::OutputTileIterator iterator_C_imag(
params_C,
ptr_C + imaginary_stride_C,
problem_size,
thread_idx
);
// Epilogue operator
Epilogue epilogue(
shared_storage,
thread_idx,
warp_idx,
lane_idx);
//
// Initialize the accumulators
//
int warp_mn = warp_idx % (Epilogue::WarpCount::kM * Epilogue::WarpCount::kN);
int warp_m = warp_mn % Epilogue::WarpCount::kM;
int warp_n = warp_mn / Epilogue::WarpCount::kM;
accumulator_ref.add_coord_offset({
warp_m * Epilogue::WarpMmaOperator::Shape::kM,
warp_n * Epilogue::WarpMmaOperator::Shape::kN});
//
// Load accumulators
//
typename Epilogue::WarpMmaOperator::IteratorC accumulator_iterator(accumulator_ref, lane_idx);
typename Epilogue::AccumulatorTile accumulators;
accumulators.clear();
accumulator_iterator.load(accumulators.real);
accumulator_iterator.load_with_pointer_offset(accumulators.imag, imaginary_stride_accum);
//
// Perform the epilogue operation
//
typename Epilogue::OutputOp output_op(params_output_op);
// Place the epilogue in a loop so assembly is clearly visible
for (int iter = 0; iter < epilogue_count; ++iter) {
epilogue(
output_op,
iterator_D_real,
iterator_D_imag,
accumulators,
iterator_C_real,
iterator_C_imag);
}
}
} // namespace kernel
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Epilogue_
>
class EpiloguePlanarComplexTestbed {
public:
using Epilogue = Epilogue_;
using ElementAccumulator = typename Epilogue::ElementAccumulator;
using ElementCompute = typename Epilogue::OutputOp::ElementCompute;
using ElementOutput = typename Epilogue::ElementOutput;
using OutputOpParams = typename Epilogue::OutputOp::Params;
using ComplexElementOutput = cutlass::complex<ElementOutput>;
using ComplexElementAccumulator = cutlass::complex<ElementAccumulator>;
using ComplexElementCompute = cutlass::complex<ElementCompute>;
public:
//
// Data members
//
cutlass::MatrixCoord quantized_size;
cutlass::HostTensorPlanarComplex<ElementAccumulator, cutlass::layout::RowMajor> accumulator_tensor;
cutlass::HostTensorPlanarComplex<ElementOutput, cutlass::layout::RowMajor> source_tensor;
cutlass::HostTensorPlanarComplex<ElementOutput, cutlass::layout::RowMajor> output_tensor;
public:
//
// Methods
//
EpiloguePlanarComplexTestbed():
quantized_size(Epilogue::Shape::kM, Epilogue::Shape::kN),
accumulator_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}),
source_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}),
output_tensor({Epilogue::Shape::kM, Epilogue::Shape::kN}) {
//
// Initialize problem space
//
#if 1
uint64_t seed = 2019;
cutlass::reference::host::TensorFillRandomUniform(
accumulator_tensor.host_view(),
seed,
20,
-20,
0);
cutlass::reference::host::TensorFillRandomUniform(
source_tensor.host_view(),
seed + 2018,
20,
-20,
0);
#else
cutlass::reference::host::BlockFillSequential(accumulator_tensor.host_data(), accumulator_tensor.capacity());
#endif
}
bool run_all() {
cutlass::complex<float> alpha_values[3];
alpha_values[0] = cutlass::complex<float>(1, 0);
alpha_values[1] = cutlass::complex<float>(0, 0);
alpha_values[2] = cutlass::complex<float>(2.25f, -0.5f);
cutlass::complex<float> beta_values[3];
beta_values[0] = cutlass::complex<float>(0, 0);
beta_values[1] = cutlass::complex<float>(1, 0);
beta_values[2] = cutlass::complex<float>(0.5f, -2.25f);
// Test runtime explodes if we tried to test every case exhaustively. This tests the full
// output tile and several smaller sizes to stress predication.
for (int m_idx = 0; m_idx < 3; ++m_idx) {
for (int n_idx = 0; n_idx < 3; ++n_idx) {
cutlass::MatrixCoord problem_size(
quantized_size.row() - m_idx * 3,
quantized_size.column() - n_idx * Epilogue::kElementsPerAccess
);
for (auto const &alpha : alpha_values) {
for (auto const &beta : beta_values) {
bool passed = run(problem_size, {alpha, beta});
if (!passed) {
return false;
}
}
}
}
}
return true;
}
/// Runs the test
bool run(
cutlass::MatrixCoord problem_size,
OutputOpParams output_params) {
//
// Initialize problem space
//
ComplexElementOutput default_output = ComplexElementOutput(ElementOutput(-127), ElementOutput(-101));
cutlass::reference::host::TensorFill(output_tensor.host_view(), default_output);
accumulator_tensor.sync_device();
output_tensor.sync_device();
source_tensor.sync_device();
//
// Initialize epilogue parameters
//
typename Epilogue::OutputTileIterator::Params params_D(output_tensor.layout());
typename Epilogue::OutputTileIterator::Params params_C(source_tensor.layout());
//
// Launch kernel
//
dim3 grid(1, 1);
dim3 block(Epilogue::WarpCount::kCount * 32, 1);
test::kernel::epilogue_planar_complex_threadblock<Epilogue><<< grid, block >>>(
params_D,
output_tensor.device_data(),
output_tensor.imaginary_stride(),
params_C,
source_tensor.device_data(),
source_tensor.imaginary_stride(),
output_params,
problem_size,
accumulator_tensor.device_view_real(),
accumulator_tensor.imaginary_stride()
);
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Kernel error: " << cudaGetErrorString(result) << std::endl;
return false;
}
//
// Verify results
//
output_tensor.sync_host();
int errors = 0;
int const kMaxErrors = 5;
for (int r = 0; errors < kMaxErrors && r < quantized_size.row(); ++r) {
for (int c = 0; errors < kMaxErrors && c < quantized_size.column(); ++c) {
cutlass::MatrixCoord coord{r, c};
ComplexElementOutput got = output_tensor.at(coord);
ComplexElementOutput expected = default_output;
if (coord.row() < problem_size.row() && coord.column() < problem_size.column()) {
ComplexElementOutput src = source_tensor.at(coord);
ComplexElementCompute tmp =
output_params.alpha * ComplexElementCompute(accumulator_tensor.at(coord)) +
output_params.beta * ComplexElementCompute(src.real(), src.imag());
expected = ComplexElementOutput(ElementOutput(tmp.real()), ElementOutput(tmp.imag()));
}
if (expected != got) {
using OutputIO = cutlass::ScalarIO<ComplexElementOutput>;
EXPECT_TRUE(false)
<< "-------\n"
<< "Error - output element (" << coord << ") - expected: "
<< OutputIO(expected)
<< ", got: " << OutputIO(got) << std::endl;
++errors;
}
}
}
//
// Report results on error
//
if (errors) {
std::cout << "Incorrect result for problem("
<< problem_size.row() << ", "
<< problem_size.column() << ") for alpha: " << output_params.alpha << ", beta: " << output_params.beta << std::endl;
std::stringstream ss;
ss
<< "output_tensor_op_" << Epilogue::Shape::kM << "x" << Epilogue::Shape::kN << "_"
<< Epilogue::WarpTileIterator::WarpShape::kM << "x"
<< Epilogue::WarpTileIterator::WarpShape::kN
<< "_slice_" << Epilogue::WarpCount::kK << ".csv";
std::ofstream output_file(ss.str());
output_file << output_tensor.host_view();
std::cout << "Wrote workspace to '" << ss.str() << "'" << std::endl;
}
return !errors;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/threadblock/testbed_planar_complex.h/0 | {
"file_path": "test/unit/epilogue/threadblock/testbed_planar_complex.h",
"repo_id": "test",
"token_count": 4326
} | 48 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for GEMM + broadcast interface
*/
#include <fstream>
#include "cutlass/cutlass.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/kernel/default_gemm_with_broadcast.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/gemm/device/gemm_universal_with_broadcast.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/epilogue/thread/linear_combination_bias_relu.h"
#include "cutlass/epilogue/thread/linear_combination_residual_block.h"
#include "../../common/cutlass_unit_test.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_elementwise.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/gemm.h"
template<typename GemmElement, typename LayoutA, typename LayoutB, typename LayoutC>
struct TestbedUtils {
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<GemmElement, LayoutA> tensor_A; // Input A
cutlass::HostTensor<GemmElement, LayoutB> tensor_B; // Input B
cutlass::HostTensor<GemmElement, LayoutC> tensor_C; // Input C
cutlass::HostTensor<GemmElement, LayoutC> tensor_D1; // Input D
cutlass::HostTensor<GemmElement, LayoutC> tensor_D2; // Input D
cutlass::HostTensor<GemmElement, LayoutC> tensor_Y1; // Input Y
cutlass::HostTensor<GemmElement, LayoutC> tensor_Y2; // Input Y
cutlass::HostTensor<GemmElement, LayoutC> tensor_Y_ref;
//
// Methods
//
TestbedUtils(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<Element>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::AllZeros) {
cutlass::reference::host::TensorFill(view);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the GEMM workspace
//
tensor_A.resize(problem_size.mk());
tensor_B.resize(problem_size.kn());
tensor_C.resize({1, problem_size.n()});
tensor_D1.resize(problem_size.mn());
tensor_D2.resize(problem_size.mn());
tensor_Y1.resize(problem_size.mn());
tensor_Y2.resize(problem_size.mn());
tensor_Y_ref.resize(problem_size.mn());
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
// Initialize D data to smaller data range. This helps avoid large roundoff errors.
int d_scope_min = -2;
int d_scope_max = 2;
cutlass::reference::host::TensorFillRandomUniform(tensor_D1.host_view(), seed + 2016, d_scope_max, d_scope_min, 0);
cutlass::reference::host::TensorFillRandomUniform(tensor_D2.host_view(), seed + 2015, d_scope_max, d_scope_min, 0);
EXPECT_TRUE(initialize_tensor(tensor_Y1.host_view(), cutlass::Distribution::AllZeros, 0));
EXPECT_TRUE(initialize_tensor(tensor_Y2.host_view(), cutlass::Distribution::AllZeros, 0));
EXPECT_TRUE(initialize_tensor(tensor_Y_ref.host_view(), cutlass::Distribution::AllZeros, 0));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = GemmElement(1);
tensor_B.host_view().at({0, 0}) = GemmElement(1);
tensor_C.host_view().at({0, 0}) = GemmElement(1);
tensor_D1.host_view().at({0, 0}) = GemmElement(1);
tensor_D2.host_view().at({0, 0}) = GemmElement(1);
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D1.sync_device();
tensor_D2.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size, cutlass::HostTensor<GemmElement, LayoutC>& tensor_Y_ref, cutlass::HostTensor<GemmElement, LayoutC>& tensor_Y) {
tensor_Y_ref.sync_host();
tensor_Y.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D1.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D2.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Y_ref.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Y.host_view()), 0);
bool passed = true;
float norm_diff = 0;
norm_diff = cutlass::reference::host::TensorNormDiff(tensor_Y_ref.host_view(), tensor_Y.host_view(), float());
passed = (norm_diff <= 0.1f);
EXPECT_LT(norm_diff, 0.1f) << " tensor_Y is incorrect";
if (!passed) {
std::ofstream file("errors_testbed_gemm_broadcast_new.txt");
file
<< "problem: " << problem_size << "\n\n";
file
<< "capacity: \n"
<< "A: " << tensor_A.capacity()
<< "\nB: " << tensor_B.capacity()
<< "\nC: " << tensor_C.capacity()
<< "\nD1: " << tensor_D1.capacity()
<< "\nD2: " << tensor_D2.capacity()
<< "\nY: " << tensor_Y.capacity()
<< "\n\n"
<< "\nY_ref: " << tensor_Y_ref.capacity()
<< "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\n\nB =\n" << tensor_B.host_view()
<< "\n\nC =\n" << tensor_C.host_view()
<< "\n\nD1 =\n" << tensor_D1.host_view()
<< "\n\nD2 =\n" << tensor_D2.host_view()
<< "\n\nY =\n" << tensor_Y.host_view()
<< "\n\nY_ref =\n" << tensor_Y_ref.host_view();
}
return passed;
}
};
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
TEST(SM80_Device_GemmWithBroadcast_f16t_f16n_f16t_tensor_op_f16, 128x128_32x3_64x64x32_16x8x16) {
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using OpClass = cutlass::arch::OpClassTensorOp;
using ArchTag = cutlass::arch::Sm80;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle;
const int kStages = 3;
const int batch_count = 1;
const cutlass::half_t alpha(1);
const cutlass::half_t beta(1);
const int M = 1024;
const int K = 10240;
const int N = 512;
cutlass::gemm::GemmCoord problem{M, N, K};
const int batch_stride_A = 0;
const int batch_stride_B = 0;
const int batch_stride_C1 = 0;
const int batch_stride_C2 = 0;
const int batch_stride_D = 0;
const int batch_stride_Vector = 0;
const int batch_stride_Tensor = 0;
const int64_t lda = LayoutA::packed({problem.m(), problem.k()}).stride(0);
const int64_t ldb = LayoutB::packed({problem.k(), problem.n()}).stride(0);
const int64_t ldc1 = LayoutC::packed({problem.m(), problem.n()}).stride(0);
const int64_t ldc2 = LayoutC::packed({problem.m(), problem.n()}).stride(0);
const int64_t ldd = LayoutC::packed({problem.m(), problem.n()}).stride(0);
const int64_t ldv = 0;
const int64_t ldt = 0;
TestbedUtils<ElementA, LayoutA, LayoutB, LayoutC> utils;
utils.initialize(problem);
//
// Create reference Gemm
//
using GemmRef = cutlass::gemm::device::GemmUniversal<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator,
OpClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator, ElementAccumulator>,
ThreadblockSwizzle, kStages>;
typename GemmRef::Arguments args_ref{
cutlass::gemm::GemmUniversalMode::kGemm,
problem,
batch_count,
{alpha, beta},
utils.tensor_A.device_data(),
utils.tensor_B.device_data(),
utils.tensor_C.device_data(),
utils.tensor_Y_ref.device_data(),
batch_stride_A,
batch_stride_B,
batch_stride_C1,
batch_stride_D,
lda,
ldb,
ldv,
ldd,
};
GemmRef gemm_op_ref;
size_t workspace_size_ref = GemmRef::get_workspace_size(args_ref);
cutlass::device_memory::allocation<uint8_t> workspace_ref(workspace_size_ref);
cutlass::Status status = gemm_op_ref.initialize(args_ref, workspace_ref.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status);
status = gemm_op_ref();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status);
//
// Create GemmWithBroadcast from single source
//
using GemmSingle = cutlass::gemm::device::GemmUniversalWithBroadcast<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator,
OpClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape,
cutlass::epilogue::thread::LinearCombinationResidualBlock<
ElementOutput, ElementAccumulator, ElementAccumulator,
ElementAccumulator, 128 / cutlass::sizeof_bits<ElementOutput>::value,
cutlass::epilogue::thread::Identity, cutlass::multiplies, cutlass::epilogue::thread::Identity>,
ThreadblockSwizzle, kStages>;
typename GemmSingle::Arguments args_single{
cutlass::gemm::GemmUniversalMode::kGemm,
problem,
batch_count,
{alpha, beta},
utils.tensor_A.device_data(),
utils.tensor_B.device_data(),
utils.tensor_D1.device_data(),
utils.tensor_Y1.device_data(),
utils.tensor_C.device_data(),
/* ptr_Tensor = */ nullptr,
batch_stride_A,
batch_stride_B,
batch_stride_C1,
batch_stride_D,
batch_stride_Vector,
batch_stride_Tensor,
lda,
ldb,
ldc1,
ldd,
ldv,
ldt
};
GemmSingle gemm_op_single;
size_t workspace_size_single = GemmSingle::get_workspace_size(args_single);
cutlass::device_memory::allocation<uint8_t> workspace_single(workspace_size_single);
status = gemm_op_single.initialize(args_single, workspace_single.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status);
status = gemm_op_single();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status);
// Compute the broadcast on the reference previously computed and compare results
utils.tensor_Y_ref.sync_host();
cutlass::reference::host::TensorMul(utils.tensor_Y_ref.host_view(), utils.tensor_D1.host_view());
utils.tensor_Y_ref.sync_device();
utils.compare_reference(problem, utils.tensor_Y_ref, utils.tensor_Y1);
//
// Create GemmWithBroadcast from two sources
//
using GemmDouble = cutlass::gemm::device::GemmUniversalWithBroadcast<
ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator,
OpClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape,
cutlass::epilogue::thread::LinearCombinationResidualBlock<
ElementOutput, ElementAccumulator, ElementAccumulator,
ElementAccumulator, 128 / cutlass::sizeof_bits<ElementOutput>::value,
cutlass::epilogue::thread::Identity, cutlass::multiplies, cutlass::epilogue::thread::Identity, cutlass::plus>,
ThreadblockSwizzle, kStages>;
typename GemmDouble::Arguments args_double{
cutlass::gemm::GemmUniversalMode::kGemm,
problem,
batch_count,
{alpha, beta},
utils.tensor_A.device_data(),
utils.tensor_B.device_data(),
utils.tensor_D1.device_data(),
utils.tensor_D2.device_data(),
utils.tensor_Y2.device_data(),
utils.tensor_C.device_data(),
/* ptr_Tensor = */ nullptr,
batch_stride_A,
batch_stride_B,
batch_stride_C1,
batch_stride_C2,
batch_stride_D,
batch_stride_Vector,
batch_stride_Tensor,
lda,
ldb,
ldc1,
ldc2,
ldd,
ldv,
ldt
};
GemmDouble gemm_op_double;
size_t workspace_size_double = GemmDouble::get_workspace_size(args_double);
cutlass::device_memory::allocation<uint8_t> workspace_double(workspace_size_double);
status = gemm_op_double.initialize(args_double, workspace_double.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status);
status = gemm_op_double();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << cutlassGetStatusString(status);
// Compute the broadcast on the reference previously computed and compare results
utils.tensor_Y_ref.sync_host();
cutlass::reference::host::TensorAdd(utils.tensor_Y_ref.host_view(), utils.tensor_D2.host_view());
utils.tensor_Y_ref.sync_device();
utils.compare_reference(problem, utils.tensor_Y_ref, utils.tensor_Y2);
}
#endif
| test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_broadcast_sm80.cu/0 | {
"file_path": "test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_broadcast_sm80.cu",
"repo_id": "test",
"token_count": 6890
} | 49 |
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# this file creates the test/unit/gemm/device simt tests
outputDir = ""
################################################################################
# parameters
# Edge - for tiles, the edges represent the length of one side
# Ratio - the maximum ratio between 2 edges, limits the skinnyness of tiles
# MaxEdge - maximum length of each edge
# Min/Max - minimum/maximum of the product of edge lengths
################################################################################
warpsPerThreadblockEdge = [1, 2, 4, 8, 16]
warpsPerThreadblockRatio = 2
warpsPerThreadblockMax = 16
# NOTE 1x32 and 2x16 warp tile shapes fail validation for ~10% of cases
warpShapeEdges = [8, 16, 32, 64, 128, 256]
warpShapeRatio = 4
warpShapeMax = 64*64
warpShapeMin = 8*8
threadblockEdgeMax = 256
# char, type bits/elem, max tile, L0 threadblock tiles
precisions = [
["c", "cutlass::complex<float>", 64, 64*128, [ [ 64, 128], [ 64, 32] ] ],
["q", "cutlass::Quaternion<float>", 64, 64*128, [ [ 64, 128], [ 64, 32] ] ],
["d", "double", 64, 64*64, [ [ 64, 64], [ 32, 32] ] ],
["h", "cutlass::half_t", 16, 128*256, [ [256, 128], [ 64, 128], [ 64, 32] ] ],
["i", "int", 32, 128*128, [ [128, 64], [ 16, 32] ] ],
["s", "float", 32, 128*128, [ [128, 256], [128, 128], [ 64, 64] ] ],
["z", "cutlass::complex<double>", 128, 64*64, [ [ 32, 64], [ 16, 32] ] ],
]
# L1 will have a single kernel for every unique shape
# L2 will have everything else
transposes = [
[False, False],
[False, True],
[True, False],
[True, True]
]
################################################################################
# warps per threadblock
################################################################################
warpsPerThreadblocks = []
for warpsPerThreadblock0 in warpsPerThreadblockEdge:
for warpsPerThreadblock1 in warpsPerThreadblockEdge:
if warpsPerThreadblock0 / warpsPerThreadblock1 <= warpsPerThreadblockRatio and warpsPerThreadblock1 / warpsPerThreadblock0 <= warpsPerThreadblockRatio and warpsPerThreadblock0 * warpsPerThreadblock1 <= warpsPerThreadblockMax:
warpsPerThreadblocks.append([warpsPerThreadblock0,
warpsPerThreadblock1])
print("WarpsPerThreadblocks",warpsPerThreadblocks)
################################################################################
# warp shapes
################################################################################
warpNumThreads = 32
warpShapes = []
for warp0 in warpShapeEdges:
for warp1 in warpShapeEdges:
if warp0 / warp1 <= warpShapeRatio and warp1 / warp0 <= warpShapeRatio and warp0*warp1 <= warpShapeMax and warp0*warp1 > warpShapeMin:
warpShapes.append([warp0, warp1])
print("WarpShapes", warpShapes)
numL0 = 0
numL1 = 0
numL2 = 0
################################################################################
# create kernels
# create a file for each precision/transpose
# each file contains many tile sizes
################################################################################
# precisions
for precision in precisions:
# get precision char
precisionChar = precision[0]
precisionType = precision[1]
precisionBits = precision[2]
threadblockMaxElements = precision[3]
threadblockTilesL0 = precision[4]
# transposes
for transpose in transposes:
# get transpose char
columnMajorA = transpose[0]
columnMajorB = transpose[1]
transCharA = "n" if columnMajorA else "t"
transCharB = "n" if columnMajorB else "t"
# open file
fileName="simt_%sgemm_%s%s_sm50.cu" % (precisionChar, transCharA, transCharB)
print("\n", fileName)
filePath = "%s%s" % (outputDir, fileName)
out = open(filePath, "w+")
# write file header
out.write("/***************************************************************************************************\n"
" * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. \n"
" * SPDX-License-Identifier: BSD-3-Clause \n"
" * \n"
" * Redistribution and use in source and binary forms, with or without \n"
" * modification, are permitted provided that the following conditions are met: \n"
" * \n"
" * 1. Redistributions of source code must retain the above copyright notice, this \n"
" * list of conditions and the following disclaimer. \n"
" * \n"
" * 2. Redistributions in binary form must reproduce the above copyright notice, \n"
" * this list of conditions and the following disclaimer in the documentation \n"
" * and/or other materials provided with the distribution. \n"
" * \n"
" * 3. Neither the name of the copyright holder nor the names of its \n"
" * contributors may be used to endorse or promote products derived from \n"
" * this software without specific prior written permission. \n"
" * \n"
" * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \n"
" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \n"
" * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \n"
" * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \n"
" * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \n"
" * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \n"
" * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \n"
" * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \n"
" * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \n"
" * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \n"
" *\n"
" **************************************************************************************************/\n"
"/*! \\file\n"
" \\brief Tests for device-wide GEMM interface\n"
"*/\n"
"\n"
"#include <iostream>\n"
"\n"
"#include \"cutlass/cutlass.h\"\n"
"#include \"cutlass/gemm/device/gemm.h\"\n"
"#include \"cutlass/numeric_types.h\"\n"
"\n"
"#include \"../../common/cutlass_unit_test.h\"\n"
"\n"
"#include \"cutlass/util/host_tensor.h\"\n"
"#include \"cutlass/util/tensor_view_io.h\"\n"
"#include \"cutlass/util/reference/host/tensor_fill.h\"\n"
"#include \"cutlass/util/reference/host/tensor_copy.h\"\n"
"#include \"cutlass/util/reference/host/tensor_compare.h\"\n"
"#include \"cutlass/util/reference/host/gemm.h\"\n"
"\n"
"#include \"testbed.h\"\n"
"\n")
foundThreadblockTilesL0 = {}
foundThreadblockTilesL1 = {}
########################################################################
# for each combination of tile sizes
########################################################################
for warpsPerThreadblock in warpsPerThreadblocks:
for warpShape in warpShapes:
warpThreadsM = 0
if warpShape[0] > warpShape[1]:
warpThreadsM = 8
else:
warpThreadsM = 4
warpThreadsN = warpNumThreads / warpThreadsM
# skip shapes with conflicting rectangularity
# they are unlikely to be fastest
blockG = warpsPerThreadblock[0] > warpsPerThreadblock[1]
blockL = warpsPerThreadblock[0] < warpsPerThreadblock[1]
warpG = warpShape[0] > warpShape[1]
warpL = warpShape[0] < warpShape[1]
blockG2 = warpsPerThreadblock[0] > warpsPerThreadblock[1]*2
blockL2 = warpsPerThreadblock[0]*2 < warpsPerThreadblock[1]
warpG2 = warpShape[0] > warpShape[1]*2
warpL2 = warpShape[0]*2 < warpShape[1]
if blockG2 and warpL: continue
if blockL2 and warpG: continue
if warpG2 and blockL: continue
if warpL2 and blockG: continue
# check threadblock ratios and max
threadblockTile = [warpShape[0]*warpsPerThreadblock[0],
warpShape[1]*warpsPerThreadblock[1]]
if threadblockTile[0] * threadblockTile[1] > threadblockMaxElements: continue
if threadblockTile[0] > threadblockEdgeMax: continue
if threadblockTile[1] > threadblockEdgeMax: continue
totalThreads = warpNumThreads*warpsPerThreadblock[0]*warpsPerThreadblock[1]
# calculate unroll
# ensure that every iteration at least a full load of A,B are done
unrollMin = 8
unrollMin0 = totalThreads / threadblockTile[0]
unrollMin1 = totalThreads / threadblockTile[1]
unroll = max(unrollMin, unrollMin0, unrollMin1)
threadTileM = warpShape[0] / warpThreadsM
threadTileN = warpShape[1] / warpThreadsN
if threadTileM < 2 or threadTileN < 2: continue
if threadTileM*threadTileN*precisionBits > 8*8*32: continue
# epilogue currently only supports N < WarpNumThreads
if threadblockTile[1] < warpNumThreads: continue
# limit smem
smemBitsA = threadblockTile[0]*unroll*2*precisionBits
smemBitsB = threadblockTile[1]*unroll*2*precisionBits
smemKBytes = (smemBitsA+smemBitsB)/8/1024
if (smemKBytes > 48): continue
# test level 0
testLevel = -1
for tileId in range(0, len(threadblockTilesL0)):
tbTile = threadblockTilesL0[tileId]
if tbTile[0] == threadblockTile[0] and tbTile[1] == threadblockTile[1]:
if tuple(tbTile) not in foundThreadblockTilesL0:
testLevel = 0
numL0 += 1
foundThreadblockTilesL0[tuple(tbTile)] = True
# test level 1
if testLevel < 0:
threadblockTileAlreadyUsed = False
if tuple(threadblockTile) not in foundThreadblockTilesL1:
testLevel = 1
numL1 += 1
foundThreadblockTilesL1[tuple(threadblockTile)] = True
# test level 2
if testLevel < 0:
testLevel = 2
numL2 += 1
################################################################
# write this tile to file
################################################################
print("%ix%ix%i__%ix%i_%ix%i_%ix%i L%i" % (
threadblockTile[0], threadblockTile[1], unroll,
threadTileM, threadTileN,
warpThreadsM, warpThreadsN,
warpsPerThreadblock[0], warpsPerThreadblock[1], testLevel))
out.write("////////////////////////////////////////////////////////////////////////////////\n"
"// Elements / Thread: %3i x %3i\n"
"// Threads / Warp: %3i x %3i\n"
"// Warps / Block: %3i x %3i\n"
"// Threadblock: %3i x %3i x %2i\n"
% ( threadTileM, threadTileN,
warpThreadsM, warpThreadsN,
warpsPerThreadblock[0], warpsPerThreadblock[1],
threadblockTile[0], threadblockTile[1], unroll
)
)
out.write("CUTLASS_TEST_L%i(SM50_device_%sgemm_%s%s, %ix%ix%i_%ix%ix1_%ix%i_%ix%i_%ix%i, {\n" % (
testLevel,
precisionChar,
transCharA,
transCharB,
threadblockTile[0],
threadblockTile[1],
unroll,
warpShape[0],
warpShape[1],
threadTileM,
threadTileN,
warpThreadsM,
warpThreadsN,
warpsPerThreadblock[0],
warpsPerThreadblock[1]
))
out.write(" using precision = %s;\n" % precisionType)
out.write(" using ThreadblockShape = cutlass::gemm::GemmShape<%i, %i, %i>;\n" % (
threadblockTile[0],
threadblockTile[1],
unroll))
out.write(" using WarpShape = cutlass::gemm::GemmShape<%i, %i, %i>;\n\n" % (
warpShape[0],
warpShape[1],
unroll))
out.write(" static int const kEpilogueElementsPerAccess = 1;\n"
" using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;\n"
" using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<\n"
" precision, kEpilogueElementsPerAccess, precision, precision>;\n\n")
out.write(" using Gemm = cutlass::gemm::device::Gemm<\n"
" precision, cutlass::layout::%sMajor,\n"
" precision, cutlass::layout::%sMajor,\n"
" precision, cutlass::layout::RowMajor,\n"
" precision,\n"
" cutlass::arch::OpClassSimt,\n"
" cutlass::arch::Sm50,\n"
" ThreadblockShape, WarpShape, InstructionShape,\n"
" EpilogueOutputOp,\n"
" cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,\n"
" 2 // Stages\n"
" >;\n" % (
"Column" if columnMajorA else "Row",
"Column" if columnMajorB else "Row",
))
out.write(" EXPECT_TRUE(test::gemm::device::TestAllGemm<Gemm>());\n"
"} )\n\n")
out.close()
print("NumKernels:", numL0, numL1, numL2)
| test/unit/gemm/device/simt_sm50.py/0 | {
"file_path": "test/unit/gemm/device/simt_sm50.py",
"repo_id": "test",
"token_count": 8209
} | 50 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#pragma once
#include <iostream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "testbed.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
struct TestbedSplitK : public Testbed<Gemm> {
using Base = Testbed<Gemm>;
using ElementCompute = typename Base::ElementCompute;
//
// Methods
//
TestbedSplitK(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
Base(init_A_, init_B_, init_C_, seed_) { }
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmCoord problem_size,
int split_k_slices,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Gemm::Arguments arguments{
problem_size,
this->tensor_A.device_ref(),
this->tensor_B.device_ref(),
this->tensor_C.device_ref(),
this->tensor_D.device_ref(),
{alpha, beta},
split_k_slices
};
Gemm gemm_op;
size_t workspace_size = Gemm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess);
//
// Run the GEMM
//
status = gemm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
//
// Verify
//
return this->verify(problem_size, alpha, beta);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
bool TestAllGemmSplitK() {
bool passed = true;
cutlass::gemm::GemmCoord problem_sizes[] = {
{8, 8, 2048},
{8, 8, 2056},
{264, 72, 520},
{264, 520, 120},
{264, 520, 264}
};
int split_k_slices[] = {
1, 2, 4, 5, 7
};
double problem_alpha[] = {
0.5
};
double problem_beta[] = {
2.0
};
using Testbed = TestbedSplitK<Gemm>;
using ElementCompute = typename Testbed::ElementCompute;
Testbed testbed;
for (auto problem_size : problem_sizes) {
for (int split_k_count : split_k_slices) {
for (double alpha : problem_alpha) {
for (double beta : problem_beta) {
passed = testbed.run(
problem_size,
split_k_count,
ElementCompute(alpha),
ElementCompute(beta)
);
if (!passed) {
std::cout << "Failed on size " << problem_size << " with split_k_count " << split_k_count << std::endl;
return false;
}
}
}
}
}
EXPECT_TRUE(passed);
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_splitk.h/0 | {
"file_path": "test/unit/gemm/device/testbed_splitk.h",
"repo_id": "test",
"token_count": 2088
} | 51 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cute/tensor.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
namespace nvrtc {
namespace thread {
template<
typename ElementA, typename ElementB, typename ElementC,
typename TileShape, typename ClusterShape,
bool kTransA, bool kTransB,
int RANK_M, int RANK_N, int RANK_K, int RANK_L
>
struct ContractionKernel {
using ElementScalar = float;
using ElementAccum = float;
using EpilogueThread = cutlass::epilogue::thread::LinearCombination<ElementC,
1,
ElementAccum,
ElementScalar>;
static constexpr cute::GMMA::Major majorA = ! kTransA ? cute::GMMA::Major::MN : cute::GMMA::Major::K;
static constexpr cute::GMMA::Major majorB = ! kTransB ? cute::GMMA::Major::K : cute::GMMA::Major::MN;
/// Kernel config
typedef int64_t stride_type;
typedef int32_t extent_type;
static constexpr const stride_type* stride_null = nullptr;
static constexpr const extent_type* extent_null = nullptr;
template <int Rank, bool IsMajor, class Indexable>
static constexpr
auto
make_stride_tuple(Indexable const& t, int n, int64_t init_default = 0) {
static_assert(Rank > 1);
if constexpr (IsMajor) {
return cute::transform(cute::make_seq<Rank>{}, [&](auto i) {
if constexpr (i == 0) {
return cute::Int<1>{};
}
else {
return i < n ? t[i] : init_default;
}
});
}
else {
return cute::make_int_tuple<Rank>(t, n, init_default);
}
}
using StrideA = decltype(cute::make_stride(
make_stride_tuple<RANK_M, majorA == cute::GMMA::Major::MN>(stride_null, 0, 0),
make_stride_tuple<RANK_K, majorA == cute::GMMA::Major::K>(stride_null, 0, 0),
cute::make_int_tuple<RANK_L>(stride_null, 0, 0)));
using StrideB = decltype(cute::make_stride(
make_stride_tuple<RANK_N, majorB == cute::GMMA::Major::MN>(stride_null, 0, 0),
make_stride_tuple<RANK_K, majorB == cute::GMMA::Major::K>(stride_null, 0, 0),
cute::make_int_tuple<RANK_L>(stride_null, 0, 0)));
using StrideC = decltype(cute::make_stride(
cute::make_int_tuple<RANK_M>(stride_null, 0, 0),
cute::make_int_tuple<RANK_N>(stride_null, 0, 0),
cute::make_int_tuple<RANK_L>(stride_null, 0, 0)));
using ProblemShape = decltype(cute::make_shape(
cute::make_int_tuple<RANK_M>(extent_null, 0, 0),
cute::make_int_tuple<RANK_N>(extent_null, 0, 0),
cute::make_int_tuple<RANK_K>(extent_null, 0, 0),
cute::make_int_tuple<RANK_L>(extent_null, 0, 0)));
using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, StrideA, 16 / sizeof(ElementA),
ElementB, StrideB, 16 / sizeof(ElementB),
ElementAccum,
TileShape, ClusterShape, cutlass::gemm::collective::StageCountAuto,
cutlass::gemm::KernelTmaWarpSpecialized
>::CollectiveOp;
using EpilogueOutputOp = cutlass::epilogue::collective::DefaultEpilogue<StrideC, StrideC, EpilogueThread, cutlass::gemm::EpilogueDefault>;
using CollectiveEpilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter<EpilogueOutputOp>;
using Kernel = cutlass::gemm::kernel::GemmUniversal<
ProblemShape,
CollectiveOp,
CollectiveEpilogue>;
};
} // namespace nvrtc
} // namespace thread
| test/unit/nvrtc/kernel/thread/contraction.hpp/0 | {
"file_path": "test/unit/nvrtc/kernel/thread/contraction.hpp",
"repo_id": "test",
"token_count": 1956
} | 52 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Common Testbed file shared by Pipeline unit tests
*/
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <cutlass/gemm/gemm.h>
#include "cutlass/util/command_line.h"
#include "../common/cutlass_unit_test.h"
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
#define CUTLASS_UNIT_TEST_PIPELINE true
#else
#define CUTLASS_UNIT_TEST_PIPELINE false
#endif
// Command line test options
struct Options {
//
// Data Members
//
bool help;
bool verification_enabled;
int SM_count;
int clock_MHz;
//
// Methods
//
Options():
help(false),
verification_enabled(true),
SM_count(116),
clock_MHz(1477)
{ }
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("verification-enabled", verification_enabled, true);
cmd.get_cmd_line_argument("sm-count", SM_count, 116);
cmd.get_cmd_line_argument("clock", clock_MHz, 1477);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --verification-enabled=<bool> Enable/Disable verification\n"
<< " --sm-count=<int> Number of SMs on the chip\n"
<< " --clock=<int> Locked clock value in Mhz\n";
return out;
}
};
//
// Testbed
//
template<typename Pipeline>
struct Testbed {
private:
// Commandline options
Options options;
void run_test(uint32_t const kNumIters) {
// Run CuTe Gemm
Pipeline pipeline;
cudaError_t result = pipeline.run(kNumIters);
CUTE_CHECK_LAST();
}
public:
Testbed(Options const &options_) : options(options_) {
int device_id = 0;
cudaDeviceProp device_prop;
CUTE_CHECK_ERROR(cudaSetDevice(device_id));
CUTE_CHECK_ERROR(cudaGetDeviceProperties(&device_prop, device_id));
if (device_prop.major < 1) {
fprintf(stderr, "Device does not support CUDA.\n");
exit(1);
}
}
/// Run verification Gemm problem sizes
bool verification() {
std::array<uint32_t, 5> kNumIters;
for (size_t i = 0; i < kNumIters.size(); ++i) {
kNumIters[i] = static_cast<uint32_t>( (rand() % 1000) + 1 );
}
for (int n : kNumIters) {
std::cout << "Stages = " << Pipeline::Stages << " kNumIters = " << n << "\n";
run_test(n);
}
return true;
}
};
| test/unit/pipeline/testbed.h/0 | {
"file_path": "test/unit/pipeline/testbed.h",
"repo_id": "test",
"token_count": 1513
} | 53 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests cutlass::transform::kernel::ConvFilterFormatTransformer
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/kernel/filter_format_transformer.hpp"
#include "cutlass/transform/device/transform_universal_adapter.hpp"
#include "thrust/universal_vector.h"
#include "thrust/host_vector.h"
#include "thrust/device_vector.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
template <class Element, class Shape_S>
auto verify_ckrs_to_crsk(thrust::host_vector<Element> const &S, thrust::host_vector<Element> const &D, Shape_S shape_s) {
using namespace cute;
int32_t errors = 0;
int32_t const kErrorLimit = 10;
if (S.size() != D.size()) {
return false;
}
auto shape_d = select<2, 0, 1, 3>(shape_s);
for (int i = 0; i < (int)S.size(); ++i) {
auto [s, r, k, c] = idx2crd(i, shape_s);
auto d_idx = crd2idx(make_coord(k, s, r, c), shape_d);
if (S[i] != D[d_idx]) {
std::cerr << "Error. S[" << i << "]: " << S[i] << ", D[" << d_idx << "]: " << D[d_idx] << std::endl;
if (++errors >= kErrorLimit) {
std::cerr << "Aborting on " << kErrorLimit << "nth error." << std::endl;
return false;
}
}
}
return errors == 0;
}
template <class Element, class Shape_S>
auto verify_ckrs_to_krsc(thrust::host_vector<Element> const &S, thrust::host_vector<Element> const &D, Shape_S shape_s) {
using namespace cute;
int32_t errors = 0;
int32_t const kErrorLimit = 10;
if (S.size() != D.size()) {
return false;
}
auto shape_d = select<3, 0, 1, 2>(shape_s);
for (int i = 0; i < (int)S.size(); ++i) {
auto [s, r, k, c] = idx2crd(i, shape_s);
auto d_idx = crd2idx(make_coord(c, s, r, k), shape_d);
if (S[i] != D[d_idx]) {
std::cerr << "Error. S[" << i << "]: " << S[i] << ", D[" << d_idx << "]: " << D[d_idx] << std::endl;
if (++errors >= kErrorLimit) {
std::cerr << "Aborting on " << kErrorLimit << "nth error." << std::endl;
return false;
}
}
}
return errors == 0;
}
template <class Element,
cutlass::transform::kernel::FilterFormat SrcFormat,
cutlass::transform::kernel::FilterFormat DstFormat,
int Alignment = 16>
bool transform_test() {
using namespace cute;
using TransformKernel = cutlass::transform::kernel::ConvFilterFormatTransformer<SrcFormat, DstFormat, 4, Element, Alignment>;
using Transform = cutlass::transform::device::TransformUniversalAdapter<TransformKernel>;
auto s = 3;
auto r = 3;
auto k = 64 + Alignment / (int)(sizeof(Element));
auto c = 64 + Alignment / (int)(sizeof(Element));
thrust::host_vector<Element> h_S(s * r * k * c);
thrust::host_vector<Element> h_D(s * r * k * c);
//
// Initialize
//
for (int i = 0; i < (int)h_S.size(); ++i) {
h_S[i] = static_cast<Element>(i);
h_D[i] = Element{};
}
thrust::device_vector<Element> d_S = h_S;
thrust::device_vector<Element> d_D = h_D;
Transform transform_op;
const void* src_ptr = static_cast<const void *>(d_S.data().get());
void* dst_ptr = static_cast<void *>(d_D.data().get());
typename TransformKernel::FilterExtent filter_extent;
filter_extent[0] = k;
filter_extent[1] = r;
filter_extent[2] = s;
filter_extent[3] = c;
auto args = typename Transform::Arguments {
src_ptr,
dst_ptr,
filter_extent
};
cutlass::Status status = cutlass::Status::kInvalid;
size_t workspace_size = Transform::get_workspace_size(args);
thrust::universal_vector<uint8_t> workspace(workspace_size);
status = transform_op.initialize(args, workspace.data().get());
if (status != cutlass::Status::kSuccess) {
cudaError_t error = cudaGetLastError();
std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n";
return false;
}
status = transform_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess);
if (status != cutlass::Status::kSuccess) {
return false;
}
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << " Kernel execution error: "
<< cudaGetErrorString(result);
// Verification
h_D = d_D;
auto tensor_shape_S = make_shape(s, r, k, c);
bool passed = false;
if constexpr(DstFormat == cutlass::transform::kernel::FilterFormat::KTRSC) {
// KTRSC
passed = verify_ckrs_to_krsc(h_S, h_D, tensor_shape_S);
}
else if constexpr(DstFormat == cutlass::transform::kernel::FilterFormat::CTRSK) {
// CTRSK;
passed = verify_ckrs_to_crsk(h_S, h_D, tensor_shape_S);
}
return passed;
}
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900))
TEST(Transform_kernel_ConvFilterFormatTransformer, ckrs_to_crsk) {
bool passed = true;
// fp16 kernel with alignment bytes from 16 to 2.
passed &= transform_test<cutlass::half_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK>();
passed &= transform_test<cutlass::half_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK, 8>();
passed &= transform_test<cutlass::half_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK, 4>();
passed &= transform_test<cutlass::half_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK, 2>();
// fp8 kernel with alignment bytes from 16 to 1.
passed &= transform_test<cutlass::float_e4m3_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK>();
passed &= transform_test<cutlass::float_e4m3_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK, 8>();
passed &= transform_test<cutlass::float_e4m3_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK, 4>();
passed &= transform_test<cutlass::float_e4m3_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK, 2>();
passed &= transform_test<cutlass::float_e4m3_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK, 1>();
// int8 kernel with alignment bytes from 16 to 1.
passed &= transform_test<int8_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK>();
passed &= transform_test<int8_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK, 8>();
passed &= transform_test<int8_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK, 4>();
passed &= transform_test<int8_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK, 2>();
passed &= transform_test<int8_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK, 1>();
// fp32 kernel with alignment bytes from 16 to 4.
passed &= transform_test<float, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK>();
passed &= transform_test<float, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK, 8>();
passed &= transform_test<float, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::CTRSK, 4>();
EXPECT_TRUE(passed);
}
// CKRS -> KRSC
TEST(Transform_kernel_ConvFilterFormatTransformer, ckrs_to_krsc) {
bool passed = true;
// fp16 kernel with alignment bytes from 16 to 2.
passed &= transform_test<cutlass::half_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC>();
passed &= transform_test<cutlass::half_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC, 8>();
passed &= transform_test<cutlass::half_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC, 4>();
passed &= transform_test<cutlass::half_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC, 2>();
// fp8 kernel with alignment bytes from 16 to 1.
passed &= transform_test<cutlass::float_e4m3_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC>();
passed &= transform_test<cutlass::float_e4m3_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC, 8>();
passed &= transform_test<cutlass::float_e4m3_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC, 4>();
passed &= transform_test<cutlass::float_e4m3_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC, 2>();
passed &= transform_test<cutlass::float_e4m3_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC, 1>();
// int8 kernel with alignment bytes from 16 to 1.
passed &= transform_test<int8_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC>();
passed &= transform_test<int8_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC, 8>();
passed &= transform_test<int8_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC, 4>();
passed &= transform_test<int8_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC, 2>();
passed &= transform_test<int8_t, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC, 1>();
// fp32 kernel with alignment bytes from 16 to 4.
passed &= transform_test<float, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC>();
passed &= transform_test<float, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC, 8>();
passed &= transform_test<float, cutlass::transform::kernel::FilterFormat::CKTRS, cutlass::transform::kernel::FilterFormat::KTRSC, 4>();
EXPECT_TRUE(passed);
}
#endif
| test/unit/transform/kernel/filter_format_transformer.cu/0 | {
"file_path": "test/unit/transform/kernel/filter_format_transformer.cu",
"repo_id": "test",
"token_count": 4194
} | 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines operations for reduction operation in CUTLASS Library.
*/
#pragma once
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "cutlass/reduction/device/reduce_split_k.h"
#include "cutlass/library/library.h"
#include "library_internal.h"
#include "cutlass/core_io.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Operator_>
class ReductionOperation : public Operation {
public:
using Operator = Operator_;
using ElementWorkspace = typename Operator::ElementWorkspace;
using ElementAccumulator = typename Operator::ElementAccumulator;
using ElementOutput = typename Operator::ElementOutput;
using ElementCompute = typename Operator::OutputOp::ElementCompute;
using OperatorArguments = typename Operator::Arguments;
protected:
///
ReductionDescription description_;
public:
/// Constructor
ReductionOperation(char const *name = "unknown_reduction") {
description_.name = name;
description_.provider = Provider::kCUTLASS;
description_.kind = OperationKind::kReduction;
description_.tile_description.threadblock_shape = make_Coord(Operator::Shape::kRow, Operator::Shape::kColumn, 1);
description_.tile_description.math_instruction.instruction_shape = make_Coord(1, 1, 1);
description_.tile_description.math_instruction.element_accumulator = NumericTypeMap<ElementAccumulator>::kId;
description_.tile_description.math_instruction.opcode_class = OpcodeClassID::kSimt;
description_.tile_description.math_instruction.math_operation = MathOperationID::kAdd;
description_.tile_description.minimum_compute_capability = 50;
description_.tile_description.maximum_compute_capability = 1024;
description_.element_workspace = NumericTypeMap<ElementWorkspace>::kId;
description_.element_output = NumericTypeMap<ElementOutput>::kId;
description_.element_epilogue = NumericTypeMap<ElementCompute>::kId;
}
/// Returns the description of the Reduction operation
virtual OperationDescription const & description() const {
return description_;
}
protected:
/// Constructs the arguments structure given the configuration and arguments
static Status construct_arguments_(
OperatorArguments &operator_args,
ReductionConfiguration const *configuration) {
operator_args.problem_size = configuration->problem_size;
operator_args.partitions = configuration->partitions;
operator_args.partition_stride = configuration->partition_stride;
operator_args.workspace = {nullptr, int(configuration->ldw)};
operator_args.source = {nullptr, int(configuration->lds)};
operator_args.destination = {nullptr, int(configuration->ldd)};
return Status::kSuccess;
}
/// Constructs the arguments structure given the configuration and arguments
static Status update_arguments_(
OperatorArguments &operator_args,
ReductionArguments const *arguments) {
if (arguments->pointer_mode == ScalarPointerMode::kHost) {
typename Operator::OutputOp::Params params(
*static_cast<ElementCompute const *>(arguments->alpha),
*static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.output = params;
}
else if (arguments->pointer_mode == ScalarPointerMode::kDevice){
typename Operator::OutputOp::Params params(
static_cast<ElementCompute const *>(arguments->alpha),
static_cast<ElementCompute const *>(arguments->beta)
);
operator_args.output = params;
}
else {
return Status::kErrorInvalidProblem;
}
operator_args.workspace.reset(static_cast<ElementWorkspace *>(const_cast<void *>(arguments->workspace)));
operator_args.source.reset(static_cast<ElementOutput *>(const_cast<void *>(arguments->source)));
operator_args.destination.reset(static_cast<ElementOutput *>(const_cast<void *>(arguments->destination)));
return Status::kSuccess;
}
public:
/// Returns success if the operation can proceed
virtual Status can_implement(
void const *configuration_ptr,
void const *arguments_ptr) const {
ReductionConfiguration const *configuration =
static_cast<ReductionConfiguration const *>(configuration_ptr);
ReductionArguments const *arguments =
static_cast<ReductionArguments const *>(arguments_ptr);
OperatorArguments args;
Status status = construct_arguments_(args, configuration);
if (status != Status::kSuccess) {
return status;
}
status = update_arguments_(args, arguments);
if (status != Status::kSuccess) {
return status;
}
return Operator::can_implement(args);
}
/// Gets the host-side workspace
virtual uint64_t get_host_workspace_size(
void const *configuration) const {
return sizeof(Operator);
}
/// Gets the device-side workspace
virtual uint64_t get_device_workspace_size(
void const *configuration_ptr,
void const *arguments_ptr = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<ReductionConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return 0;
}
return Operator::get_workspace_size(args);
}
/// Initializes the workspace
virtual Status initialize(
void const *configuration_ptr,
void *host_workspace,
void *device_workspace,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = construct_arguments_(
args,
static_cast<ReductionConfiguration const *>(configuration_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = new (host_workspace) Operator;
//std::cout << "initialize library::Reduction" << std::endl;
//print_operator_args(args);
return op->initialize(args, device_workspace, stream);
}
/// Runs the kernel
virtual Status run(
void const *arguments_ptr,
void *host_workspace,
void *device_workspace = nullptr,
cudaStream_t stream = nullptr) const {
OperatorArguments args;
Status status = update_arguments_(
args,
static_cast<ReductionArguments const *>(arguments_ptr));
if (status != Status::kSuccess) {
return status;
}
Operator *op = static_cast<Operator *>(host_workspace);
status = op->update(args, device_workspace);
if (status != Status::kSuccess) {
return status;
}
//std::cout << "run library::Reduction" << std::endl;
//print_operator_args(args);
return op->run(stream);
}
/// Call print_operator_args from the Reduction::initialize()
// to dump arguments passed on to cutlass operator for debugging
void print_operator_args(OperatorArguments &operator_args) const {
std::cout << "Reduction::OperatorArguments" << std::endl
<< " problem_size: "
<< operator_args.problem_size << std::endl
<< " partitions: "
<< operator_args.partitions << std::endl
<< " partition_stride: "
<< operator_args.partition_stride << std::endl
<< " epilogue (alpha, beta): "
<< operator_args.output.alpha << ", "
<< operator_args.output.beta << std::endl
<< " workspace (ptr, stride): "
<< operator_args.workspace.data() << ", "
<< operator_args.workspace.stride(0) << std::endl
<< " source (ptr, stride): "
<< operator_args.source.data() << ", "
<< operator_args.source.stride(0) << std::endl
<< " destination (ptr, stride): "
<< operator_args.destination.data() << ", "
<< operator_args.destination.stride(0) << std::endl;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/reduction/reduction_operation.h/0 | {
"file_path": "tools/library/src/reduction/reduction_operation.h",
"repo_id": "tools",
"token_count": 3308
} | 55 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief
*/
#pragma once
#include <map>
#include <string>
#include "cutlass/library/library.h"
#include "cutlass/library/util.h"
#include "options.h"
#include "device_allocation.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Collection of allocations on the device
class DeviceContext {
public:
//
// Type definitions
//
using AllocationMap = std::map<std::string, DeviceAllocation *>;
private:
//
// Data members
//
/// Memory allocations that exist (owning)
DeviceAllocationList device_memory_;
/// Non-owning set of named allocations
AllocationMap allocations_;
public:
/// Allocates memory of a given type, capacity (elements), and name
DeviceAllocation *allocate_block(
std::string const &name,
library::NumericTypeID type,
size_t capacity);
/// Allocates memory of a given type, capacity (elements), and name
DeviceAllocation *allocate_tensor(
std::string const &name,
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride = std::vector<int64_t>(),
int batch_count = 1);
/// Allocates memory of a given type, capacity (elements), and name
DeviceAllocation *allocate_tensor(
Options const &options,
std::string const &name,
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count,
int seed_shift = 0);
/// Allocates memory for sparse meta data
DeviceAllocation *allocate_sparsemeta_tensor(
Options const &options,
std::string const &name,
library::NumericTypeID type,
library::LayoutTypeID layout_id,
library::NumericTypeID type_a,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count,
int seed_shift = 0);
/// Clears named allocations (but does not necessarily free memory)
void clear();
/// Frees all device memory allocations
void free();
/// Gets the allocation by name
DeviceAllocation &at(std::string const &name);
size_t size() const;
AllocationMap::iterator begin();
AllocationMap::iterator end();
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
| tools/profiler/include/cutlass/profiler/device_context.h/0 | {
"file_path": "tools/profiler/include/cutlass/profiler/device_context.h",
"repo_id": "tools",
"token_count": 1232
} | 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cutlass/profiler/cublas_helpers.h"
#include "cutlass/profiler/rank_k_operation_profiler.h"
#include "cutlass/profiler/gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
RankKOperationProfiler::RankKOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kRankK,
{
{ArgumentTypeID::kEnumerated, {"rank_k_kind"}, "Variant of RankK (universal)"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the RankK problem space"},
{ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the RankK problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
{ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for RankK kernel (lower or upper)"},
{ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for RankK kernel (symmetric or hermitian)"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of RankK computed in one batch"},
},
{ library::Provider::kCUBLAS}
) {
description_ = " Rank-k Update. D = alpha * A*A^T + beta * C (symmetric) or D = alpha * A*A^H + beta * C (hermitian)";
}
/// Destructor
RankKOperationProfiler::~RankKOperationProfiler() {
}
/// Prints usage statement for the math function
void RankKOperationProfiler::print_usage(std::ostream &out) const {
out << "RankK" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void RankKOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size Syrk kernel:\n"
<< " $ cutlass_profiler --operation=rank_k --blas_mode=symmetric --n=1024 --k=128\n\n"
<< "Profile a particular problem size Herk kernel:\n"
<< " $ cutlass_profiler --operation=rank_k --blas_mode=hermitian --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=rank_k --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=rank_k --accumulator-type=f16,f32\n\n"
<< "Schmoo over fill modees:\n"
<< " $ cutlass_profiler --operation=rank_k --fill_mode=lower/upper\n\n"
<< "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=rank_k --A=f16:column or --A=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=rank_k --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=rank_k --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=rank_k --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=rank_k --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to rank_k kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=rank_k \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status RankKOperationProfiler::RankKProblem::parse(
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
}
if (this->split_k_slices > 1 && this->batch_count > 1) {
// At least one of these must be one
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->n), int(this->k)}).front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->n), int(this->n)}).front();
return Status::kSuccess;
}
/// Total number of bytes loaded
int64_t RankKOperationProfiler::RankKProblem::bytes(library::RankKDescription const &operation_desc) const {
// Input bytes read and Output bytes written for the gemm problem
int64_t bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k +
int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k +
// Half matrix including the diagonal will have (N*(N+1))/2 elements
int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
}
bytes *= batch_count;
return bytes;
}
/// Total number of flops computed
int64_t RankKOperationProfiler::RankKProblem::flops(library::RankKDescription const &operation_desc) const {
// FLOPs = 2 * n(n+1)k/2 [mma] + 2 * n(n+1)/2 [epilogue]
// FLOPs = n(n+1)(k + 1)
int64_t flops_ = n * (n + 1) * (k + 1);
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddGaussianComplex:
flops_ *= 3;
break;
default: break;
}
return flops_;
}
/// Initializes a performance result
void RankKOperationProfiler::RankKProblem::initialize_result(
PerformanceResult &result,
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "rank_k_kind", problem_space, library::to_string(operation_desc.rank_k_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode));
set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode));
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status RankKOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::RankKDescription const &operation_desc =
static_cast<library::RankKDescription const &>(operation->description());
if (operation_desc.rank_k_kind != library::RankKKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
rank_k_workspace_.configuration.problem_size.m() = int(problem_.n);
rank_k_workspace_.configuration.problem_size.n() = int(problem_.n);
rank_k_workspace_.configuration.problem_size.k() = int(problem_.k);
rank_k_workspace_.configuration.lda = problem_.lda;
rank_k_workspace_.configuration.ldc = problem_.ldc;
rank_k_workspace_.configuration.ldd = problem_.ldc;
//rank_k_workspace_.configuration.split_k_slices = int(problem_.split_k_slices);
rank_k_workspace_.configuration.batch_count = int(problem_.split_k_slices);
rank_k_workspace_.arguments.A = nullptr;
rank_k_workspace_.arguments.C = nullptr;
rank_k_workspace_.arguments.D = nullptr;
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&rank_k_workspace_.configuration, &rank_k_workspace_.arguments);
}
/// Initializes the performance result
void RankKOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
result.bytes = problem_.bytes(operation_desc);
result.flops = problem_.flops(operation_desc);
result.runtime = 0;
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
result.flops *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
result.flops *= 4;
break;
default: break;
}
}
/// Initializes workspace
Status RankKOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::RankKDescription const &operation_desc =
static_cast<library::RankKDescription const &>(operation->description());
if (options.execution_mode != ExecutionMode::kDryRun) {
int seed_shift = 0;
rank_k_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.n), int(problem_.k)},
{int(problem_.lda)},
1, // batch_count
seed_shift++
);
rank_k_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)},
1, // batch_count
seed_shift++
);
rank_k_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)}
);
rank_k_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)}
);
rank_k_workspace_.Computed->copy_from_device(rank_k_workspace_.C->data());
rank_k_workspace_.Reference->copy_from_device(rank_k_workspace_.C->data());
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(&rank_k_workspace_.configuration);
rank_k_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(&rank_k_workspace_.configuration);
rank_k_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = operation->initialize(
&rank_k_workspace_.configuration,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kRankK;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool RankKOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&rank_k_workspace_.arguments,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const & rank_k_desc = static_cast<library::RankKDescription const &>(operation->description());
if (cublas_satisfies(rank_k_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool RankKOperationProfiler::verify_with_cublas_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
#if CUTLASS_ENABLE_CUBLAS
library::RankKDescription const &rank_k_desc =
static_cast<library::RankKDescription const &>(operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
cublasStatus_t status = handle.get_cublas_create_status();
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to cublas<t>Syrk()
//
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.Reference->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Reference->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
detail::cublasRankKDispatcher rank_k_op(
rank_k_desc,
rank_k_workspace_.configuration,
rank_k_workspace_.arguments
);
if (rank_k_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = rank_k_op(handle);
// Handle errors
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
options,
*rank_k_workspace_.Computed,
*rank_k_workspace_.Reference
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
rank_k_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool RankKOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&rank_k_workspace_.arguments,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/rank_k_operation_profiler.cu/0 | {
"file_path": "tools/profiler/src/rank_k_operation_profiler.cu",
"repo_id": "tools",
"token_count": 8486
} | 57 |
/******************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* \brief cuda kernels to transform a device memory tensor from NHWC layout to NCHW layout.
*/
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/tensor_ref.h"
namespace cutlass {
/** \brief interface to transform a device memory tensor from NHWC layout to NCHW layout.
* \tparam T: data type
*/
template <typename T>
void nhwc_to_nchw(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNCHW> ref_output,
cudaStream_t stream);
template <typename T>
__global__ void nhwc_to_nchw_kernel(T *output,
const T *input,
const int n,
const int h,
const int w,
const int c) {
const int hw = h*w;
const int hwc = hw*c;
__shared__ T shbuf[32 * (32 + 1)];
const int32_t tid = threadIdx.y*blockDim.x + threadIdx.x;
const int32_t wid = tid / 32;
const int32_t lid = tid % 32;
const int32_t ni = blockIdx.z;
const int32_t hwi0 = blockIdx.y * 32;
const int32_t ci0 = blockIdx.x * 32;
const size_t input_idx = ni * hwc + (hwi0 + wid) * c + ci0;
const T *A = input + input_idx;
if (ci0 + lid < c) {
const int lid_x_33 = lid * 33;
if ((hwi0 + 32) <= hw) {
int hwi = wid; // between 0 and 7
CUTLASS_PRAGMA_UNROLL
for (int cLoopIdx = 0; cLoopIdx < 4; cLoopIdx++) {
shbuf[lid_x_33 + hwi] = A[lid];
A = &A[8 * c];
hwi += 8;
}
} else {
for (int hwi = wid; hwi < 32; hwi += 8) {
if ((hwi + hwi0) < hw) {
shbuf[lid_x_33 + hwi] = A[lid];
}
A = &A[8 * c];
}
}
}
__syncthreads();
const int32_t hwiOut = hwi0 + lid;
output = &output[ni * hwc + hwiOut];
if (hwiOut < hw) {
if (ci0 + 32 < c) {
int cI = wid;
CUTLASS_PRAGMA_UNROLL
for (int hwLoopIdx = 0; hwLoopIdx < 4; ++hwLoopIdx) {
output[(ci0 + cI) * hw] = shbuf[(cI)*33 + lid];
cI += 8;
}
} else {
for (int cI = wid; cI < 32; cI += 8) {
if (ci0 + cI < c) {
output[(ci0 + cI) * hw] = shbuf[(cI)*33 + lid];
}
}
}
}
}
template <typename T>
void nhwc_to_nchw(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNCHW> ref_output,
cudaStream_t stream) {
assert(
input_tensor_size.n() == output_tensor_size.n() &&
input_tensor_size.h() == output_tensor_size.c() &&
input_tensor_size.w() == output_tensor_size.h() &&
input_tensor_size.c() == output_tensor_size.w());
int n = input_tensor_size.n();
int h = input_tensor_size.h();
int w = input_tensor_size.w();
int c = input_tensor_size.c();
dim3 grid((c + 31)/32, (h*w + 31)/32, n);
dim3 block(32, 8);
nhwc_to_nchw_kernel<<<grid, block, 0, stream>>>(ref_output.data(), ref_input.data(),
n, h, w, c);
}
} //namespace cutlass
| tools/util/include/cutlass/util/device_nhwc_to_nchw.h/0 | {
"file_path": "tools/util/include/cutlass/util/device_nhwc_to_nchw.h",
"repo_id": "tools",
"token_count": 2295
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for convolution in device-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/functional.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
namespace cutlass {
namespace reference {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Conv2d device reference kernel
////////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2d Fprop kernel - y = fprop(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 16, // shape of a threadblock in units of threads
int kCtaShapeN = 8 // shape of a threadblock in units of threads
>
__global__ void Conv2dFprop(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int64_t npq_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int k_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_n[kThreadM];
int thread_p[kThreadM];
int thread_q[kThreadM];
// Compute N, P, Q coordinates for each row of a thread's tile
int64_t PQ = int64_t(problem_size.P) * problem_size.Q;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int64_t npq = npq_start + m;
thread_n[m] = int(npq / PQ);
int64_t residual = npq % PQ;
thread_p[m] = int(residual / problem_size.Q);
thread_q[m] = int(residual % problem_size.Q);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
int c_per_group = problem_size.C / problem_size.groups;
int k_per_group = problem_size.K / problem_size.groups;
// Compute convolution
for (int R = 0; R < problem_size.R; ++R) {
for (int S = 0; S < problem_size.S; ++S) {
for (int C = 0; C < problem_size.C; ++C) {
// Get group id of currnet channel
int c_group_idx = C / c_per_group;
// Load from activations tensor
int filter_r = R;
int filter_s = S;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - R;
filter_s = problem_size.S - 1 - S;
}
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int h = thread_p[m] * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = thread_q[m] * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
if (thread_n[m] < problem_size.N && h >= 0 && h < problem_size.H && w >= 0 && w < problem_size.W) {
element_A[m] = ElementAccumulator(tensor_x.at({thread_n[m], h, w, C}));
}
else {
element_A[m] = ElementAccumulator();
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_k = k_start + n;
int k_group_idx = thread_k / k_per_group;
if (thread_k < problem_size.K && k_group_idx == c_group_idx) {
element_B[n] = ElementAccumulator(tensor_w.at({thread_k, R, S, C % c_per_group}));
}
else {
element_B[n] = ElementAccumulator();
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
}
}
}
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
if (thread_n[m] < problem_size.N && thread_p[m] < problem_size.P && thread_q[m] < problem_size.Q) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_k = k_start + n;
if (thread_k < problem_size.K) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_y_in.at({thread_n[m], thread_p[m], thread_q[m], thread_k}));
}
tensor_y_out.at({thread_n[m], thread_p[m], thread_q[m], thread_k}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
}
}
}
}
// Conv3d Fprop kernel - y = fprop(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 16, // shape of a threadblock in units of threads
int kCtaShapeN = 8 // shape of a threadblock in units of threads
>
__global__ void Conv3dFprop(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int64_t nzpq_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int k_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_n[kThreadM];
int thread_z[kThreadM];
int thread_p[kThreadM];
int thread_q[kThreadM];
// Compute N, Z, P, Q coordinates for each row of a thread's tile
int64_t PQ = int64_t(problem_size.P) * problem_size.Q;
int64_t ZPQ = PQ * problem_size.Z;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int64_t nzpq = nzpq_start + m;
thread_n[m] = int(nzpq / ZPQ);
int64_t residual = nzpq % ZPQ;
thread_z[m] = int(residual / PQ);
residual = residual % PQ;
thread_p[m] = int(residual / problem_size.Q);
thread_q[m] = int(residual % problem_size.Q);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
// Compute convolution
for (int T = 0; T < problem_size.T; ++T) {
for (int R = 0; R < problem_size.R; ++R) {
for (int S = 0; S < problem_size.S; ++S) {
for (int C = 0; C < problem_size.C; ++C) {
// Load from activations tensor
int filter_t = T;
int filter_r = R;
int filter_s = S;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - T;
filter_r = problem_size.R - 1 - R;
filter_s = problem_size.S - 1 - S;
}
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int d = thread_z[m] * problem_size.stride_d - problem_size.pad_d + filter_t * problem_size.dilation_d;
int h = thread_p[m] * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = thread_q[m] * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
if (thread_n[m] < problem_size.N &&
d >= 0 && d < problem_size.D &&
h >= 0 && h < problem_size.H &&
w >= 0 && w < problem_size.W) {
element_A[m] = ElementAccumulator(tensor_x.at({thread_n[m], d, h, w, C}));
}
else {
element_A[m] = ElementAccumulator();
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_k = k_start + n;
if (thread_k < problem_size.K) {
element_B[n] = ElementAccumulator(tensor_w.at({thread_k, T, R, S, C}));
}
else {
element_B[n] = ElementAccumulator();
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
} // for (C)
} // for (S)
} // for (R)
} // for (T)
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
if (thread_n[m] < problem_size.N &&
thread_z[m] < problem_size.Z &&
thread_p[m] < problem_size.P &&
thread_q[m] < problem_size.Q) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_k = k_start + n;
if (thread_k < problem_size.K) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_y_in.at({thread_n[m], thread_z[m], thread_p[m], thread_q[m], thread_k}));
}
tensor_y_out.at({thread_n[m], thread_z[m], thread_p[m], thread_q[m], thread_k}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
} // for (n)
}
} // for (m)
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2d dgrad kernel - dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 16, // shape of a threadblock in units of threads
int kCtaShapeN = 8 // shape of a threadblock in units of threads
>
__global__ void Conv2dDgrad(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int64_t nhw_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int c_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_n[kThreadM];
int thread_h[kThreadM];
int thread_w[kThreadM];
// Compute N, H, W coordinates for each row of a thread's tile
int64_t HW = int64_t(problem_size.H) * problem_size.W;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int64_t nhw = nhw_start + m;
thread_n[m] = int(nhw / HW);
int64_t residual = nhw % HW;
thread_h[m] = int(residual / problem_size.W);
thread_w[m] = int(residual % problem_size.W);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
// Compute convolution
for (int R = 0; R < problem_size.R; ++R) {
for (int S = 0; S < problem_size.S; ++S) {
for (int K = 0; K < problem_size.K; ++K) {
// Load from activations tensor
int filter_r = R;
int filter_s = S;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - R;
filter_s = problem_size.S - 1 - S;
}
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int p = thread_h[m] + problem_size.pad_h - filter_r * problem_size.dilation_h;
int q = thread_w[m] + problem_size.pad_w - filter_s * problem_size.dilation_w;
element_A[m] = ElementAccumulator();
if (p >= 0 && !(p % problem_size.stride_h) && q >= 0 && !(q % problem_size.stride_w)) {
p = p / problem_size.stride_h;
q = q / problem_size.stride_w;
if (thread_n[m] < problem_size.N && p < problem_size.P && q < problem_size.Q) {
element_A[m] = ElementAccumulator(tensor_dy.at({thread_n[m], p, q, K}));
}
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_c = c_start + n;
if (thread_c < problem_size.C) {
element_B[n] = ElementAccumulator(tensor_w.at({K, R, S, thread_c}));
}
else {
element_B[n] = ElementAccumulator();
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
}
}
}
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
if (thread_n[m] < problem_size.N && thread_h[m] < problem_size.H && thread_w[m] < problem_size.W) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_c = c_start + n;
if (thread_c < problem_size.C) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_dx_in.at({thread_n[m], thread_h[m], thread_w[m], thread_c}));
}
tensor_dx_out.at({thread_n[m], thread_h[m], thread_w[m], thread_c}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
}
}
}
}
// Conv3d dgrad kernel - dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 16, // shape of a threadblock in units of threads
int kCtaShapeN = 8 // shape of a threadblock in units of threads
>
__global__ void Conv3dDgrad(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int64_t ndhw_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int c_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_n[kThreadM];
int thread_d[kThreadM];
int thread_h[kThreadM];
int thread_w[kThreadM];
// Compute N, H, W coordinates for each row of a thread's tile
int64_t HW = int64_t(problem_size.H) * problem_size.W;
int64_t DHW = HW * problem_size.D;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int64_t ndhw = ndhw_start + m;
thread_n[m] = int(ndhw / DHW);
int64_t residual = ndhw % DHW;
thread_d[m] = int(residual / HW);
residual = residual % HW;
thread_h[m] = int(residual / problem_size.W);
thread_w[m] = int(residual % problem_size.W);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
// Compute convolution
for (int T = 0; T < problem_size.T; ++T) {
for (int R = 0; R < problem_size.R; ++R) {
for (int S = 0; S < problem_size.S; ++S) {
for (int K = 0; K < problem_size.K; ++K) {
// Load from activations tensor
int filter_t = T;
int filter_r = R;
int filter_s = S;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - T;
filter_r = problem_size.R - 1 - R;
filter_s = problem_size.S - 1 - S;
}
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int z = thread_d[m] + problem_size.pad_d - filter_t * problem_size.dilation_d;
int p = thread_h[m] + problem_size.pad_h - filter_r * problem_size.dilation_h;
int q = thread_w[m] + problem_size.pad_w - filter_s * problem_size.dilation_w;
element_A[m] = ElementAccumulator();
if (z >= 0 && !(z % problem_size.stride_d) &&
p >= 0 && !(p % problem_size.stride_h) &&
q >= 0 && !(q % problem_size.stride_w)) {
z = z / problem_size.stride_d;
p = p / problem_size.stride_h;
q = q / problem_size.stride_w;
if (thread_n[m] < problem_size.N && z < problem_size.Z && p < problem_size.P && q < problem_size.Q) {
element_A[m] = ElementAccumulator(tensor_dy.at({thread_n[m], z, p, q, K}));
}
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_c = c_start + n;
if (thread_c < problem_size.C) {
element_B[n] = ElementAccumulator(tensor_w.at({K, T, R, S, thread_c}));
}
else {
element_B[n] = ElementAccumulator();
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
} // for (C)
} // for (S)
} // for (R)
} // for (T)
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
if (thread_n[m] < problem_size.N &&
thread_d[m] < problem_size.D &&
thread_h[m] < problem_size.H &&
thread_w[m] < problem_size.W) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_c = c_start + n;
if (thread_c < problem_size.C) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_dx_in.at({thread_n[m], thread_d[m], thread_h[m], thread_w[m], thread_c}));
}
tensor_dx_out.at({thread_n[m], thread_d[m], thread_h[m], thread_w[m], thread_c}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2d wgrad kernel - dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 8, // shape of a threadblock in units of threads
int kCtaShapeN = 16 // shape of a threadblock in units of threads
>
__global__ void Conv2dWgrad(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int k_start = blockIdx.x * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int64_t rsc_start = int64_t(blockIdx.y) * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_r[kThreadN];
int thread_s[kThreadN];
int thread_c[kThreadN];
// Compute R, S, C coordinates for each row of a thread's tile
int64_t SC = int64_t(problem_size.S) * problem_size.C;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int64_t rsc = rsc_start + n;
int64_t residual = rsc % SC;
thread_r[n] = int(rsc / SC);
thread_s[n] = int(residual / problem_size.C);
thread_c[n] = int(residual % problem_size.C);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
// Compute convolution
for (int N = 0; N < problem_size.N; ++N) {
for (int P = 0; P < problem_size.P; ++P) {
for (int Q = 0; Q < problem_size.Q; ++Q) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int thread_k = k_start + m;
element_A[m] = ElementAccumulator();
if (thread_k < problem_size.K) {
element_A[m] = ElementAccumulator(tensor_dy.at({N, P, Q, thread_k}));
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
// Load from activations tensor
int filter_r = thread_r[n];
int filter_s = thread_s[n];
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - filter_r;
filter_s = problem_size.S - 1 - filter_s;
}
int h = P * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = Q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
element_B[n] = ElementAccumulator();
if (h >= 0 && h < problem_size.H && w >= 0 && w < problem_size.W && thread_c[n] < problem_size.C) {
element_B[n] = ElementAccumulator(tensor_x.at({N, h, w, thread_c[n]}));
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
}
}
}
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int thread_k = k_start + m;
if (thread_k < problem_size.K) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
if (thread_r[n] < problem_size.R && thread_s[n] < problem_size.S && thread_c[n] < problem_size.C) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_dw_in.at({thread_k, thread_r[n], thread_s[n], thread_c[n]}));
}
tensor_dw_out.at({thread_k, thread_r[n], thread_s[n], thread_c[n]}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
}
}
}
}
// Conv3d wgrad kernel - dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 8, // shape of a threadblock in units of threads
int kCtaShapeN = 16 // shape of a threadblock in units of threads
>
__global__ void Conv3dWgrad(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int k_start = blockIdx.x * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int64_t trsc_start = int64_t(blockIdx.y) * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_t[kThreadN];
int thread_r[kThreadN];
int thread_s[kThreadN];
int thread_c[kThreadN];
// Compute R, S, C coordinates for each row of a thread's tile
int64_t SC = int64_t(problem_size.S) * problem_size.C;
int64_t RSC = SC * problem_size.R;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int64_t trsc = trsc_start + n;
thread_t[n] = int(trsc / RSC);
int64_t residual = trsc % RSC;
thread_r[n] = int(residual / SC);
residual = residual % SC;
thread_s[n] = int(residual / problem_size.C);
thread_c[n] = int(residual % problem_size.C);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
// Compute convolution
for (int N = 0; N < problem_size.N; ++N) {
for (int Z = 0; Z < problem_size.Z; ++Z) {
for (int P = 0; P < problem_size.P; ++P) {
for (int Q = 0; Q < problem_size.Q; ++Q) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int thread_k = k_start + m;
element_A[m] = ElementAccumulator();
if (thread_k < problem_size.K) {
element_A[m] = ElementAccumulator(tensor_dy.at({N, Z, P, Q, thread_k}));
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
// Load from activations tensor
int filter_t = thread_t[n];
int filter_r = thread_r[n];
int filter_s = thread_s[n];
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - filter_t;
filter_r = problem_size.R - 1 - filter_r;
filter_s = problem_size.S - 1 - filter_s;
}
int d = Z * problem_size.stride_d - problem_size.pad_w + filter_t * problem_size.dilation_d;
int h = P * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = Q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
element_B[n] = ElementAccumulator();
if (d >= 0 && d < problem_size.D &&
h >= 0 && h < problem_size.H &&
w >= 0 && w < problem_size.W &&
thread_c[n] < problem_size.C) {
element_B[n] = ElementAccumulator(tensor_x.at({N, d, h, w, thread_c[n]}));
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
} // for (Q)
} // for (P)
} // for (Z)
} // for (N)
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int thread_k = k_start + m;
if (thread_k < problem_size.K) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
if (thread_t[n] < problem_size.T &&
thread_r[n] < problem_size.R &&
thread_s[n] < problem_size.S &&
thread_c[n] < problem_size.C) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_dw_in.at({thread_k, thread_t[n], thread_r[n], thread_s[n], thread_c[n]}));
}
tensor_dw_out.at({thread_k, thread_t[n], thread_r[n], thread_s[n], thread_c[n]}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Conv2d Fprop dispatcher - y = fprop(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv2dFprop(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 4; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 16; // shape of a threadblock in units of threads
int const kCtaShapeN = 8; // shape of a threadblock in units of threads
int64_t npq = int64_t(problem_size.N) * problem_size.P * problem_size.Q;
int64_t blocks_m = (npq + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid(uint32_t(blocks_m), (problem_size.K + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN));
kernel::Conv2dFprop<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_x,
tensor_w,
tensor_y_in,
tensor_y_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Conv3d Fprop dispatcher - y = fprop(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv3dFprop(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 4; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 16; // shape of a threadblock in units of threads
int const kCtaShapeN = 8; // shape of a threadblock in units of threads
int64_t nzpq = int64_t(problem_size.N) * problem_size.Z * problem_size.P * problem_size.Q;
int64_t blocks_m = (nzpq + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid(uint32_t(blocks_m), (problem_size.K + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN));
kernel::Conv3dFprop<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_x,
tensor_w,
tensor_y_in,
tensor_y_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Conv2d Dgrad dispatcher - dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv2dDgrad(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 16; // shape of a threadblock in units of threads
int const kCtaShapeN = 8; // shape of a threadblock in units of threads
int64_t nhw = int64_t(problem_size.N) * problem_size.H * problem_size.W;
int64_t blocks_m = (nhw + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid(uint32_t(blocks_m), (problem_size.C + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN));
kernel::Conv2dDgrad<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_dy,
tensor_w,
tensor_dx_in,
tensor_dx_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Conv3d Dgrad dispatcher - dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv3dDgrad(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 16; // shape of a threadblock in units of threads
int const kCtaShapeN = 8; // shape of a threadblock in units of threads
int64_t ndhw = int64_t(problem_size.N) * problem_size.D * problem_size.H * problem_size.W;
int64_t blocks_m = (ndhw + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid(uint32_t(blocks_m), (problem_size.C + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN));
kernel::Conv3dDgrad<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_dy,
tensor_w,
tensor_dx_in,
tensor_dx_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Conv2d Wgrad dispatcher - dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv2dWgrad(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 8; // shape of a threadblock in units of threads
int const kCtaShapeN = 16; // shape of a threadblock in units of threads
int64_t rsc = int64_t(problem_size.R) * problem_size.S * problem_size.C;
int64_t blocks_n = (rsc + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid((problem_size.K + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM), uint32_t(blocks_n));
kernel::Conv2dWgrad<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_dy,
tensor_x,
tensor_dw_in,
tensor_dw_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Conv3d Wgrad dispatcher - dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv3dWgrad(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 8; // shape of a threadblock in units of threads
int const kCtaShapeN = 16; // shape of a threadblock in units of threads
int64_t trsc = int64_t(problem_size.T) * problem_size.R * problem_size.S * problem_size.C;
int64_t blocks_n = (trsc + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid((problem_size.K + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM), uint32_t(blocks_n));
kernel::Conv3dWgrad<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_dy,
tensor_x,
tensor_dw_in,
tensor_dw_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Generic 2D convolution targeting Conv2dFprop, Conv2dDgrad, and Conv2dWgrad.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv2d(
conv::Operator convolutional_operator,
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_A,
TensorRef<ElementB, LayoutB> tensor_B,
TensorRef<ElementC, LayoutC> tensor_C,
TensorRef<ElementC, LayoutC> tensor_D,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
switch (convolutional_operator) {
case conv::Operator::kFprop:
return Conv2dFprop<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
break;
case conv::Operator::kDgrad:
return Conv2dDgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
break;
case conv::Operator::kWgrad:
return Conv2dWgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
break;
default: break;
}
return Status::kErrorNotSupported;
}
/// Generic 3D convolution targeting Conv3dFprop, Conv3dDgrad, and Conv3dWgrad.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv3d(
conv::Operator convolutional_operator,
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_A,
TensorRef<ElementB, LayoutB> tensor_B,
TensorRef<ElementC, LayoutC> tensor_C,
TensorRef<ElementC, LayoutC> tensor_D,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
switch (convolutional_operator) {
case conv::Operator::kFprop:
return Conv3dFprop<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
case conv::Operator::kDgrad:
return Conv3dDgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
case conv::Operator::kWgrad:
return Conv3dWgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
default: break;
}
return Status::kErrorNotSupported;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/reference/device/convolution.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/device/convolution.h",
"repo_id": "tools",
"token_count": 20794
} | 59 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for convolution in host-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/functional.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include <iostream>
namespace cutlass {
namespace reference {
namespace host {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Forward propagation
////////////////////////////////////////////////////////////////////////////////////////////////////
/// y = conv2d(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ElementD = ElementC,
typename ConvertOp = NumericConverter<ElementD, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv2dFprop(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementD, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
// Apply MMA and accumulate ElementAccumulator
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
for (int k = 0; k < problem_size.K; ++k) {
int group_idx = k / (problem_size.K / problem_size.groups);
int channels_per_group = problem_size.C / problem_size.groups;
ElementAccumulator acc = ElementAccumulator();
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int c = 0; c < channels_per_group; ++c) {
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
int h = p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
if (h >= 0 && h < problem_size.H && w >= 0 && w < problem_size.W) {
ElementA a = tensor_x.at({n, h, w, c + group_idx * channels_per_group});
ElementB b = tensor_w.at({k, r, s, c});
acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc);
}
}
}
}
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_y_in.at(cutlass::make_Coord(n, p, q, k));
}
tensor_y_out.at(cutlass::make_Coord(n, p, q, k)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
}
}
}
}
}
/// Depthwise-separable convolution
template <typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ElementD = ElementC,
typename ConvertOp = NumericConverter<ElementD, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>>
void Depsep_Fprop(cutlass::TensorView<ElementA, LayoutA> tensor_A,
cutlass::TensorView<ElementB, LayoutB> tensor_B,
cutlass::TensorView<ElementC, LayoutC> tensor_C,
cutlass::TensorView<ElementD, LayoutC> tensor_D,
ElementCompute alpha,
ElementCompute beta,
cutlass::Tensor4DCoord padding = cutlass::Tensor4DCoord(),
cutlass::Coord<2> conv_stride = cutlass::Coord<2>(),
cutlass::Coord<2> dilation = cutlass::Coord<2>(),
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
// Apply MMA and accumulate ElementAccumulator
for (int n = 0; n < tensor_C.extent().n(); ++n) {
for (int p = 0; p < tensor_C.extent().h(); ++p) {
for (int q = 0; q < tensor_C.extent().w(); ++q) {
for (int g = 0; g < tensor_C.extent().c(); ++g) {
ElementAccumulator acc = ElementAccumulator();
for (int r = 0; r < tensor_B.extent().h(); ++r) {
for (int s = 0; s < tensor_B.extent().w(); ++s) {
// input activation H and W
int h = p * conv_stride[0] - padding[0] + r * dilation[0];
int w = q * conv_stride[1] - padding[2] + s * dilation[1];
if (h < tensor_A.extent().h() && h >= 0 && w < tensor_A.extent().w() && w >= 0) {
ElementA a = tensor_A.at(cutlass::make_Coord(n, h, w, g));
ElementB b = (mode == cutlass::conv::Mode::kCrossCorrelation)
? tensor_B.at(cutlass::make_Coord(g, r, s, 0))
: tensor_B.at(cutlass::make_Coord(
g, tensor_B.extent().h() - r - 1, tensor_B.extent().w() - s - 1, 0));
acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc);
}
}
}
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = tensor_C.at(cutlass::make_Coord(n, p, q, g));
tensor_D.at(cutlass::make_Coord(n, p, q, g)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Dgrad / Deconv
////////////////////////////////////////////////////////////////////////////////////////////////////
/// dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ElementD = ElementC,
typename ConvertOp = NumericConverter<ElementD, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv2dDgrad(
cutlass::conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementD, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta,
bool is_deconv = false) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
// Apply MMA and accumulate ElementAccumulator
for (int n = 0; n < problem_size.N; ++n) {
for (int h = 0; h < problem_size.H; ++h) {
for (int w = 0; w < problem_size.W; ++w) {
for (int c = 0; c < problem_size.C; ++c) {
ElementAccumulator acc = ElementAccumulator();
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int k = 0; k < problem_size.K; ++k) {
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
int p = h + problem_size.pad_h - filter_r * problem_size.dilation_h;
int q = w + problem_size.pad_w - filter_s * problem_size.dilation_w;
if (p >= 0 && (p % problem_size.stride_h) == 0 &&
q >= 0 && (q % problem_size.stride_w) == 0) {
p = p / problem_size.stride_h;
q = q / problem_size.stride_w;
#if 0
std::cout << "row:"
<< n * problem_size.H * problem_size.W +
h * problem_size.W +
w << " "
<< "n, p, q: ("
<< n << ", "
<< p << ", "
<< q << ") * "
<< "r, s: ("
<< r << ", "
<< s << ") ["
<< ((p < problem_size.P && q < problem_size.Q) ? "true":"false") << "]"
<< std::endl;
#endif
if (p < problem_size.P && q < problem_size.Q) {
ElementA a = tensor_dy.at(cutlass::make_Coord(n, p, q, k));
ElementB b = is_deconv ? tensor_w.at(cutlass::make_Coord(c, r, s, k))
: tensor_w.at(cutlass::make_Coord(k, r, s, c));
acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc);
}
}
} // for (K)
} // for (S)
} // for (R)
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_dx_in.at(cutlass::make_Coord(n, h, w, c));
}
tensor_dx_out.at(cutlass::make_Coord(n, h, w, c)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
} // for (C)
} // for (W)
} // for (H)
} // for (N)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Wgrad
////////////////////////////////////////////////////////////////////////////////////////////////////
/// dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ElementD = ElementC,
typename ConvertOp = NumericConverter<ElementD, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv2dWgrad(
cutlass::conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementD, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta) {
InnerProductOp inner_product_op;
ConvertOp convert_op;
// Apply MMA and accumulate ElementAccumulator
for (int k = 0; k < problem_size.K; ++k) {
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int c = 0; c < problem_size.C; ++c) {
ElementAccumulator acc = ElementAccumulator();
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
cutlass::Tensor4DCoord b_coord;
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
b_coord = make_Coord(
n,
p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h,
q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w,
c);
if (b_coord.h() < problem_size.H && b_coord.h() >= 0 &&
b_coord.w() < problem_size.W && b_coord.w() >= 0) {
ElementAccumulator a = ElementAccumulator(tensor_dy.at(cutlass::make_Coord(n, p, q, k)));
ElementAccumulator b = ElementAccumulator(tensor_x.at(b_coord));
acc = inner_product_op(a, b, acc);
}
}
}
}
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_dw_in.at(cutlass::make_Coord(k, r, s, c));
}
tensor_dw_out.at(cutlass::make_Coord(k, r, s, c)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
} // for (C)
} // for (S)
} // for (R)
} // for (K)
}
/// Generic 2D convolution targeting Conv2dFprop, Conv2dDgrad, and Conv2dWgrad.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ElementD = ElementC,
typename ConvertOp = NumericConverter<ElementD, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv2d(
conv::Operator convolutional_operator,
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_A,
TensorRef<ElementB, LayoutB> tensor_B,
TensorRef<ElementC, LayoutC> tensor_C,
TensorRef<ElementD, LayoutC> tensor_D,
ElementCompute alpha,
ElementCompute beta) {
switch (convolutional_operator) {
case conv::Operator::kFprop:
Conv2dFprop<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ElementD,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta);
break;
case conv::Operator::kDeconv:
case conv::Operator::kDgrad:
Conv2dDgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ElementD,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, (convolutional_operator == conv::Operator::kDeconv));
break;
case conv::Operator::kWgrad:
Conv2dWgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ElementD,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta);
break;
default:
break;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// 3D convolution
////////////////////////////////////////////////////////////////////////////////////////////////////
/// y = conv3d(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv3dFprop(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
// Apply MMA and accumulate ElementAccumulator
for (int n = 0; n < problem_size.N; ++n) {
for (int z = 0; z < problem_size.Z; ++z) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
for (int k = 0; k < problem_size.K; ++k) {
ElementAccumulator acc = ElementAccumulator();
for (int t = 0; t < problem_size.T; ++t) {
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int c = 0; c < problem_size.C; ++c) {
int filter_t = t;
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - t;
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
int d = z * problem_size.stride_d - problem_size.pad_d + filter_t * problem_size.dilation_d;
int h = p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
if (d >= 0 && d < problem_size.D &&
h >=0 && h < problem_size.H &&
w >= 0 && w < problem_size.W) {
ElementA a = tensor_x.at({n, d, h, w, c});
ElementB b = tensor_w.at({k, t, r, s, c});
acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc);
}
}
}
}
}
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_y_in.at(cutlass::make_Coord(n, z, p, q, k));
}
tensor_y_out.at(cutlass::make_Coord(n, z, p, q, k)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Dgrad / Deconv
////////////////////////////////////////////////////////////////////////////////////////////////////
/// dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv3dDgrad(
cutlass::conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta,
bool is_deconv = false) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
// Apply MMA and accumulate ElementAccumulator
for (int n = 0; n < problem_size.N; ++n) {
for (int d = 0; d < problem_size.D; ++d) {
for (int h = 0; h < problem_size.H; ++h) {
for (int w = 0; w < problem_size.W; ++w) {
for (int c = 0; c < problem_size.C; ++c) {
ElementAccumulator acc = ElementAccumulator();
for (int t = 0; t < problem_size.T; ++t) {
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int k = 0; k < problem_size.K; ++k) {
int filter_t = t;
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - t;
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
int z = d + problem_size.pad_d - filter_t * problem_size.dilation_d;
int p = h + problem_size.pad_h - filter_r * problem_size.dilation_h;
int q = w + problem_size.pad_w - filter_s * problem_size.dilation_w;
if (z >= 0 && (z % problem_size.stride_d) == 0 &&
p >= 0 && (p % problem_size.stride_h) == 0 &&
q >= 0 && (q % problem_size.stride_w) == 0) {
z = z / problem_size.stride_d;
p = p / problem_size.stride_h;
q = q / problem_size.stride_w;
if (z < problem_size.Z && p < problem_size.P && q < problem_size.Q) {
ElementA a = tensor_dy.at(cutlass::make_Coord(n, z, p, q, k));
ElementB b = is_deconv ? tensor_w.at(cutlass::make_Coord(c, t, r, s, k))
: tensor_w.at(cutlass::make_Coord(k, t, r, s, c));
acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc);
}
}
} // for (K)
} // for (S)
} // for (R)
} // for (T)
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_dx_in.at(cutlass::make_Coord(n, d, h, w, c));
}
tensor_dx_out.at(cutlass::make_Coord(n, d, h, w, c)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
} // for (C)
} // for (W)
} // for (H)
} // for (D)
} // for (N)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Wgrad
////////////////////////////////////////////////////////////////////////////////////////////////////
/// dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv3dWgrad(
cutlass::conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta) {
InnerProductOp inner_product_op;
ConvertOp convert_op;
// Apply MMA and accumulate ElementAccumulator
for (int k = 0; k < problem_size.K; ++k) {
for (int t = 0; t < problem_size.T; ++t) {
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int c = 0; c < problem_size.C; ++c) {
ElementAccumulator acc = ElementAccumulator();
for (int n = 0; n < problem_size.N; ++n) {
for (int z = 0; z < problem_size.Z; ++z) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
int filter_t = t;
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - t;
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
Tensor5DCoord b_coord = make_Coord(
n,
z * problem_size.stride_d - problem_size.pad_d + filter_t * problem_size.dilation_d,
p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h,
q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w,
c);
if (b_coord.d() < problem_size.D && b_coord.d() >= 0 &&
b_coord.h() < problem_size.H && b_coord.h() >= 0 &&
b_coord.w() < problem_size.W && b_coord.w() >= 0) {
ElementAccumulator a = ElementAccumulator(tensor_dy.at(cutlass::make_Coord(n, z, p, q, k)));
ElementAccumulator b = ElementAccumulator(tensor_x.at(b_coord));
acc = inner_product_op(a, b, acc);
}
}
}
}
}
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_dw_in.at(cutlass::make_Coord(k, t, r, s, c));
}
tensor_dw_out.at(cutlass::make_Coord(k, t, r, s, c)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
} // for (C)
} // for (S)
} // for (R)
} // for (T)
} // for (K)
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Generic 3D convolution targeting Conv2dFprop, Conv2dDgrad, and Conv2dWgrad.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv3d(
conv::Operator convolutional_operator,
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_A,
TensorRef<ElementB, LayoutB> tensor_B,
TensorRef<ElementC, LayoutC> tensor_C,
TensorRef<ElementC, LayoutC> tensor_D,
ElementCompute alpha,
ElementCompute beta) {
switch (convolutional_operator) {
case conv::Operator::kFprop:
Conv3dFprop<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta);
break;
case conv::Operator::kDeconv:
case conv::Operator::kDgrad:
Conv3dDgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, (convolutional_operator == conv::Operator::kDeconv));
break;
case conv::Operator::kWgrad:
Conv3dWgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta);
break;
default:
break;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/reference/host/convolution.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/convolution.h",
"repo_id": "tools",
"token_count": 13227
} | 60 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Provides several functions for filling tensors with data.
*/
#pragma once
// Standard Library includes
#include <utility>
#include <cstdlib>
#include <cmath>
// Cute includes
#include "cute/tensor.hpp"
// Cutlass includes
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/quaternion.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Uniform and procedural tensor fills
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with a scalar element
template <typename Tensor>
void TensorFill(Tensor dst, typename Tensor::value_type element) {
for (int64_t idx = 0; idx < cute::size(dst); ++idx) {
dst(idx) = element;
}
}
/// Fills a tensor with the contents of its layout
template <typename Tensor>
void TensorFillSequential(Tensor dst) {
auto layout = dst.layout();
for (int64_t idx = 0; idx < cute::size(dst); ++idx) {
dst(idx) = layout(idx);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Random uniform values
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element>
struct RandomUniformFunc {
using Real = typename RealType<Element>::Type;
uint64_t seed;
double range;
double min;
int int_scale;
//
// Methods
//
RandomUniformFunc(
uint64_t seed_ = 0,
double max = 1,
double min_ = 0,
int int_scale_ = -1
):
seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
Element operator()() const {
double rnd = double(std::rand()) / double(RAND_MAX);
rnd = min + range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
Element result;
if (int_scale >= 0) {
rnd = double(int64_t(rnd * double(1 << int_scale))) / double(1 << int_scale);
result = static_cast<Element>(Real(rnd));
}
else {
result = static_cast<Element>(Real(rnd));
}
return result;
}
};
/// Partial specialization for initializing a complex value.
template <typename Element>
struct RandomUniformFunc<complex<Element> > {
using Real = typename RealType<Element>::Type;
uint64_t seed;
double range;
double min;
int int_scale;
//
// Methods
//
RandomUniformFunc(
uint64_t seed_ = 0,
double max = 1,
double min_ = 0,
int int_scale_ = -1
):
seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
complex<Element> operator()() const {
Element reals[2];
for (int i = 0; i < 2; ++i) {
double rnd = double(std::rand()) / double(RAND_MAX);
rnd = min + range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
if (int_scale >= 0) {
rnd = double(int(rnd * double(1 << int_scale)));
reals[i] = from_real<Element>(Real(rnd / double(1 << int_scale)));
}
else {
reals[i] = from_real<Element>(Real(rnd));
}
}
return complex<Element>(reals[0], reals[1]);
}
};
/// Partial specialization for initializing a Quaternion value.
template <typename Element>
struct RandomUniformFunc<Quaternion<Element> > {
using Real = typename RealType<Element>::Type;
uint64_t seed;
double range;
double min;
int int_scale;
//
// Methods
//
RandomUniformFunc(
uint64_t seed_ = 0,
double max = 1,
double min_ = 0,
int int_scale_ = -1
):
seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
Quaternion<Element> operator()() const {
Element reals[4];
for (int i = 0; i < 4; ++i) {
double rnd = double(std::rand()) / double(RAND_MAX);
rnd = min + range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
if (int_scale >= 0) {
rnd = double(int(rnd * double(1 << int_scale)));
reals[i] = from_real<Element>(Real(rnd / double(1 << int_scale)));
}
else {
reals[i] = from_real<Element>(Real(rnd));
}
}
return make_Quaternion(reals[0], reals[1], reals[2], reals[3]);
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <typename Tensor> ///< Tensor object
void TensorFillRandomUniform(
Tensor dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomUniformFunc<typename Tensor::value_type> random_func(seed, max, min, bits);
for (int64_t idx = 0; idx < cute::size(dst); ++idx) {
dst(idx) = random_func();
}
}
/// Fills a block with random values with a uniform random distribution.
template <
typename Element ///< Element type
>
void BlockFillRandomUniform(
Element *ptr,
size_t capacity,
uint64_t seed, ///< seed for RNG
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomUniformFunc<Element> random_func(seed, max, min, bits);
for (size_t i = 0; i < capacity; ++i) {
ptr[i] = random_func();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Random Gaussian
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element>
struct RandomGaussianFunc {
uint64_t seed;
double mean;
double stddev;
int int_scale;
double pi;
//
// Methods
//
RandomGaussianFunc(
uint64_t seed_ = 0,
double mean_ = 0,
double stddev_ = 1,
int int_scale_ = -1
):
seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
Element operator()() const {
// Box-Muller transform to generate random numbers with Normal distribution
double u1 = double(std::rand()) / double(RAND_MAX);
double u2 = double(std::rand()) / double(RAND_MAX);
// Compute Gaussian random value
double rnd = std::sqrt(-2 * std::log(u1)) * std::cos(2 * pi * u2);
rnd = mean + stddev * rnd;
// Scale and convert final result
Element result;
if (int_scale >= 0) {
rnd = double(int64_t(rnd * double(1 << int_scale))) / double(1 << int_scale);
result = static_cast<Element>(rnd);
}
else {
result = static_cast<Element>(rnd);
}
return result;
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a Gaussian distribution.
template <
typename Tensor
>
void TensorFillRandomGaussian(
Tensor dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double mean = 0, ///< Gaussian distribution's mean
double stddev = 1, ///< Gaussian distribution's standard deviation
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomGaussianFunc<typename Tensor::value_type> random_func(seed, mean, stddev, bits);
for (int64_t idx = 0; idx < cute::size(dst); ++idx) {
dst(idx) = random_func();
}
}
/// Fills a block with random values with a Gaussian distribution.
template <
typename Element ///< Element type
>
void BlockFillRandomGaussian(
Element *ptr, ///< destination buffer
size_t capacity, ///< number of elements
uint64_t seed, ///< seed for RNG
double mean = 0, ///< Gaussian distribution's mean
double stddev = 1, ///< Gaussian distribution's standard deviation
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomGaussianFunc<Element> random_func(seed, mean, stddev, bits);
for (size_t i = 0; i < capacity; ++i) {
ptr[i] = random_func();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a block of data with sequential elements
template <
typename Element
>
void BlockFillSequential(
Element *ptr,
int64_t capacity,
Element v = Element(1),
Element s = Element(0)) {
int i = 0;
while (i < capacity) {
ptr[i] = Element(s + v);
++i;
}
}
/// Fills a block of data with sequential elements
template <
typename Element
>
void BlockFillSequentialModN(
Element *ptr,
int64_t capacity,
int64_t mod,
int64_t v = int64_t(1),
int64_t s = int64_t(0)) {
int i = 0;
while (i < capacity) {
ptr[i] = static_cast<Element>(int32_t(int64_t(s + v) % mod));
++i;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/reference/host/tensor_fill.hpp/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/tensor_fill.hpp",
"repo_id": "tools",
"token_count": 4719
} | 61 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS layout visualization tool
*/
#include <map>
#include <iostream>
#include <iomanip>
#include <memory>
#include <cutlass/cutlass.h>
#include "options.h"
#include "register_layout.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
std::map<std::string, std::unique_ptr<VisualizeLayoutBase> > layouts;
/////////////////////////////////////////////////////////////////////////////////////////////////
void print_usage(std::ostream &out) {
out << "03_visualize_layout <layout> [options]"
<< "\n\n"
<< " Layouts:\n";
for (auto const & layout : layouts) {
out << " " << layout.first << std::string(46 - layout.first.size(), ' ');
layout.second->print_help(out);
out << "\n";
}
out << "\n";
Options::print_usage(out);
out << "\nExamples:\n\n"
<< "$ 03_visualize_layout RowMajor --extent=16,16\n"
<< "$ 03_visualize_layout \"ColumnMajorInterleaved<4>\" --extent=32,8 "
"--output-shape=16 --vectorize=4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<4,64>\" "
"--extent=64,64 --vectorize=32 --output-shape=256,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<4,128>\" "
"--extent=128,32 --vectorize=32 --output-shape=256,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<4,256>\" "
"--extent=256,16 --vectorize=32 --output-shape=256,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<8,32>\" "
"--extent=32,64 --vectorize=16 --output-shape=128,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<8,64>\" "
"--extent=64,32 --vectorize=16 --output-shape=128,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<8,128>\" "
"--extent=128,16 --vectorize=16 --output-shape=128,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<16,32>\" "
"--extent=32,32 --vectorize=8 --output-shape=64,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<16,64>\" "
"--extent=64,16 --vectorize=8 --output-shape=64,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<32,16>\" "
"--extent=16,32 --vectorize=4 --output-shape=32,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicand<32,32>\" "
"--extent=32,16 --vectorize=4 --output-shape=32,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicandCongruous<32,32>\" "
"--extent=32,16 --vectorize=4 --output-shape=32,4\n"
<< "$ 03_visualize_layout \"TensorOpMultiplicandCongruous<64, 16>\" "
"--extent=16,16 --vectorize=2 --output-shape=16,4\n"
<< "$ 03_visualize_layout \"VoltaTensorOpMultiplicandCrosswise<16,32>\" "
"--extent=32,64 --vectorize=4 --output-shape=64,4\n"
<< "$ 03_visualize_layout \"VoltaTensorOpMultiplicandCongruous<16>\" "
"--extent=64,32 --vectorize=8 --output-shape=64,4\n";
out << std::endl;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point
int main(int argc, char const *arg[]) {
RegisterLayouts(layouts);
if (argc == 1 || (std::string(arg[0]) == "-h" || std::string(arg[1]) == "--help")) {
print_usage(std::cout);
return 0;
}
// parse command line, skipping layout name
cutlass::CommandLine cmd_line(argc - 1, arg + 1);
Options options(cmd_line);
if (options.help) {
print_usage(std::cout);
return 0;
}
if (!options.good) {
return -1;
}
std::string layout_name = arg[1];
auto layout_it = layouts.find(layout_name);
if (layout_it == layouts.end()) {
std::cerr << "Layout '" << layout_name << "' not supported." << std::endl;
return -1;
}
bool passed = layout_it->second->visualize(options);
if (!passed) {
return -1;
}
layout_it->second->print_csv(std::cout);
cudaFree(0); // Ensure CUDA is available.
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/03_visualize_layout/visualize_layout.cpp/0 | {
"file_path": "examples/03_visualize_layout/visualize_layout.cpp",
"repo_id": "examples",
"token_count": 2052
} | 0 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined Implicit GEMM kernel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/semaphore.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/epilogue/threadblock/output_iterator_parameter.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename B2bMma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad)
typename ConvProblemSize_ = Conv2dProblemSize ///! Convolutional operator on 2D or 3D problem
>
struct B2bImplicitGemmConvolution {
using B2bMma = B2bMma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp0 = typename B2bMma::OutputOp;
using EpilogueOutputOp1 = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static Operator const kConvolutionalOperator = ConvOperator;
using ElementA = typename B2bMma::IteratorA0::Element;
using LayoutA = typename B2bMma::IteratorA0::Layout;
using ElementB = typename B2bMma::IteratorB0::Element;
using LayoutB = typename B2bMma::IteratorB0::Layout;
using ElementC = typename EpilogueOutputOp1::ElementOutput;
/// Set output tensor C layout
using LayoutC = LayoutA;
using ElementAccumulator = typename EpilogueOutputOp0::ElementAccumulator;
using ElementCompute = typename EpilogueOutputOp0::ElementCompute;
/// Scale and Bias
using ElementScaleBias = typename B2bMma::IteratorAccumulatorScaleBias::Element;
using LayoutScaleBias = typename B2bMma::IteratorAccumulatorScaleBias::Layout;
using WarpMmaOperator0 = typename B2bMma::Policy0::Operator;
using WarpMmaOperator1 = typename B2bMma::Policy1::Operator;
using ArchMmaOperator = typename WarpMmaOperator0::ArchMmaOperator;
using MathOperator = typename ArchMmaOperator::Operator;
using OperatorClass = typename WarpMmaOperator0::OperatorClass;
using ArchTag = typename WarpMmaOperator0::ArchTag;
using ThreadblockShape0 = typename B2bMma::Shape0;
using ThreadblockShape1 = typename B2bMma::Shape1;
using WarpShape0 = typename WarpMmaOperator0::Shape;
using WarpShape1 = typename WarpMmaOperator1::Shape;
using InstructionShape = typename ArchMmaOperator::Shape;
static int const kStages = B2bMma::kStages;
static IteratorAlgorithm const kIteratorAlgorithm = B2bMma::IteratorA0::kIteratorAlgorithm;
/// Warp count (concept: GemmShape)
using WarpCount0 = typename B2bMma::WarpCount0;
static int const kThreadCount = 32 * WarpCount0::kCount;
using TensorRefA0 = typename B2bMma::IteratorA0::TensorRef;
using TensorRefB0 = typename B2bMma::IteratorB0::TensorRef;
using TensorRefScaleBias0 = typename B2bMma::IteratorAccumulatorScaleBias::TensorRef;
using TensorRefB1 = typename B2bMma::IteratorB1::TensorRef;
using TensorRefC = cutlass::TensorRef<ElementC, LayoutC>;
/// Check iterator A and B convolution dimension are the same and
// set device::B2bImplicitGemmConvolution::kConvDim
static_assert(B2bMma::IteratorA0::kConvDim == B2bMma::IteratorB0::kConvDim,
"Convolution on different dimensions is not supported");
static int const kConvDim = B2bMma::IteratorA0::kConvDim;
/// Conv dimension and problem size structure (Conv2d or Conv3d)
using ConvProblemSize = ConvProblemSize_;
/// Wgrad C stride idx for implicit gemm algorithm
// Conv2d row-major matrix C (KxRSC)
// Conv3d row-major matrix C (KxTRSC)
static int const kWgradCStrideIdx =
cutlass::platform::is_same<LayoutC, cutlass::layout::TensorNHWC>::value ? 2 : 3;
/// This chooses the appropriate stride element of the C tensor.
static int const kTensorCStrideIdx =
(kConvolutionalOperator == conv::Operator::kWgrad ? kWgradCStrideIdx : 0);
//
//
//
using ConvOutputIteratorParameter = epilogue::threadblock::ConvOutputIteratorParameter<
LayoutC,
typename Epilogue::OutputTileIterator::Layout,
TensorRefC,
ConvOperator,
ConvProblemSize
>;
/// Argument structure
struct Arguments {
//
// Data members
//
ConvProblemSize problem_size_0;
ConvProblemSize problem_size_1;
TensorRefA0 ref_A0;
TensorRefB0 ref_B0;
TensorRefC ref_C0;
TensorRefScaleBias0 ref_Scale0;
TensorRefScaleBias0 ref_Bias0;
TensorRefB1 ref_B1;
TensorRefC ref_C1;
TensorRefC ref_D1;
typename EpilogueOutputOp0::Params output_op_0;
typename EpilogueOutputOp1::Params output_op_1;
SplitKMode split_k_mode;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments() { }
CUTLASS_HOST_DEVICE
Arguments(
ConvProblemSize const & problem_size_0,
ConvProblemSize const & problem_size_1
):
problem_size_0(problem_size_0),
problem_size_1(problem_size_1) { }
CUTLASS_HOST_DEVICE
Arguments(
ConvProblemSize const & problem_size_0,
ConvProblemSize const & problem_size_1,
TensorRefA0 const & ref_A0,
TensorRefB0 const & ref_B0,
TensorRefC const & ref_C0,
TensorRefScaleBias0 const & ref_Scale0,
TensorRefScaleBias0 const & ref_Bias0,
TensorRefB1 const & ref_B1,
TensorRefC const & ref_C1,
TensorRefC const & ref_D1,
typename EpilogueOutputOp0::Params const & output_op_0,
typename EpilogueOutputOp1::Params const & output_op_1,
SplitKMode const & split_k_mode = SplitKMode::kSerial
):
problem_size_0(problem_size_0),
problem_size_1(problem_size_1),
ref_A0(ref_A0),
ref_B0(ref_B0),
ref_C0(ref_C0),
ref_Scale0(ref_Scale0),
ref_Bias0(ref_Bias0),
ref_B1(ref_B1),
ref_C1(ref_C1),
ref_D1(ref_D1),
output_op_0(output_op_0),
output_op_1(output_op_1),
split_k_mode(split_k_mode)
{
}
};
/// Parameters structure
struct Params {
ConvProblemSize problem_size_0;
ConvProblemSize problem_size_1;
cutlass::gemm::GemmCoord grid_tiled_shape;
gemm::GemmCoord implicit_gemm_problem_size_0;
gemm::GemmCoord implicit_gemm_problem_size_1;
int swizzle_log_tile;
int gemm_k_iterations_0;
int gemm_k_iterations_1;
typename B2bMma::IteratorA0::Params iterator_A0;
typename B2bMma::IteratorA0::Element const *ptr_A0;
typename B2bMma::IteratorB0::Params iterator_B0;
typename B2bMma::IteratorB0::Element const *ptr_B0;
typename Epilogue::OutputTileIterator::Params iterator_C0;
typename Epilogue::OutputTileIterator::Element *ptr_C0;
typename B2bMma::IteratorAccumulatorScaleBias::Element *ptr_Scale0;
typename B2bMma::IteratorAccumulatorScaleBias::Element *ptr_Bias0;
typename B2bMma::IteratorB1::Params iterator_B1;
typename B2bMma::IteratorB1::Element const *ptr_B1;
typename Epilogue::OutputTileIterator::Params iterator_C1;
typename Epilogue::OutputTileIterator::Element *ptr_C1;
typename Epilogue::OutputTileIterator::Params iterator_D1;
typename Epilogue::OutputTileIterator::Element *ptr_D1;
typename EpilogueOutputOp0::Params output_op_0;
typename EpilogueOutputOp1::Params output_op_1;
int *semaphore;
SplitKMode split_k_mode;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(): swizzle_log_tile(0), gemm_k_iterations_0(0), gemm_k_iterations_1(0) { }
///
CUTLASS_HOST_DEVICE
Params(
Arguments const &args,
int *semaphore = nullptr
):
problem_size_0(args.problem_size_0),
problem_size_1(args.problem_size_1),
implicit_gemm_problem_size_0(cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size_0)),
implicit_gemm_problem_size_1(cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size_1)),
iterator_A0(B2bMma::IteratorA0::getParams(args.problem_size_0, args.ref_A0.layout())),
ptr_A0(args.ref_A0.data()),
iterator_B0(args.problem_size_0, args.ref_B0.layout()),
ptr_B0(args.ref_B0.data()),
iterator_C0(ConvOutputIteratorParameter::layout(args.ref_C0)),
ptr_C0(args.ref_C0.data()),
ptr_Scale0(args.ref_Scale0.data()),
ptr_Bias0(args.ref_Bias0.data()),
iterator_B1(args.problem_size_1, args.ref_B1.layout()),
ptr_B1(args.ref_B1.data()),
iterator_C1(ConvOutputIteratorParameter::layout(args.ref_C1)),
ptr_C1(args.ref_C1.data()),
iterator_D1(ConvOutputIteratorParameter::layout(args.ref_D1)),
ptr_D1(args.ref_D1.data()),
output_op_0(args.output_op_0),
output_op_1(args.output_op_1),
semaphore(semaphore),
split_k_mode(args.split_k_mode)
{
gemm_k_iterations_0 = implicit_gemm_k_iterations(kConvolutionalOperator, ThreadblockShape0::kK, args.problem_size_0);
gemm_k_iterations_1 = implicit_gemm_k_iterations(kConvolutionalOperator, ThreadblockShape1::kK, args.problem_size_1);
ThreadblockSwizzle threadblock_swizzle;
grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
implicit_gemm_problem_size_0,
{ThreadblockShape0::kM, ThreadblockShape0::kN, ThreadblockShape0::kK},
args.problem_size_0.split_k_slices);
swizzle_log_tile = ThreadblockSwizzle().get_log_tile(grid_tiled_shape);
}
};
/// Shared memory storage structure
union SharedStorage {
typename B2bMma::B2bMmaSharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
B2bImplicitGemmConvolution() { }
/// Executes one ImplicitGEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_idx =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_idx.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_idx.n()) {
return;
}
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename B2bMma::IteratorA0 iterator_A0(
params.iterator_A0,
params.problem_size_0,
params.ptr_A0,
thread_idx,
MatrixCoord(
threadblock_tile_idx.m() * B2bMma::Shape0::kM,
threadblock_tile_idx.k() * B2bMma::Shape0::kK
)
);
typename B2bMma::IteratorB0 iterator_B0(
params.iterator_B0,
params.problem_size_0,
params.ptr_B0,
thread_idx,
MatrixCoord(
threadblock_tile_idx.k() * B2bMma::Shape0::kK,
threadblock_tile_idx.n() * B2bMma::Shape0::kN
)
);
typename B2bMma::IteratorB1 iterator_B1(
params.iterator_B1,
params.problem_size_1,
params.ptr_B1,
thread_idx,
MatrixCoord(
threadblock_tile_idx.k() * B2bMma::Shape1::kK,
threadblock_tile_idx.n() * B2bMma::Shape1::kN
)
);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
// Construct iterators to accumulator scale/bias vector
typename B2bMma::IteratorAccumulatorScaleBias iterator_Scale0(
params.ptr_Scale0,
{1, params.problem_size_0.K},
thread_idx,
warp_idx,
MatrixCoord(
0, threadblock_tile_idx.n() * B2bMma::Shape0::kN
)
);
typename B2bMma::IteratorAccumulatorScaleBias iterator_Bias0(
params.ptr_Bias0,
{1, params.problem_size_0.K},
thread_idx,
warp_idx,
MatrixCoord(
0, threadblock_tile_idx.n() * B2bMma::Shape0::kN
)
);
//
// Main loop
//
EpilogueOutputOp0 output_op_0(params.output_op_0);
// Construct thread-scoped matrix multiply
B2bMma b2bMma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename B2bMma::FragmentC0 src_accum;
typename B2bMma::FragmentC1 accumulators;
src_accum.clear();
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
b2bMma(params.gemm_k_iterations_0, accumulators, iterator_A0, iterator_B0,
iterator_Scale0, iterator_Bias0, iterator_B1, src_accum, output_op_0);
//
// Epilogue
//
EpilogueOutputOp1 output_op_1(params.output_op_1);
// Construct the semaphore.
int block_idx = threadblock_tile_idx.m() + threadblock_tile_idx.n() * params.grid_tiled_shape.m();
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// Compute logical position within grid
threadblock_tile_idx =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// If performing a reduction via split-K, fetch the initial synchronization
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op_1.set_k_partition(threadblock_tile_idx.k(), params.grid_tiled_shape.k());
}
MatrixCoord threadblock_offset(
threadblock_tile_idx.m() * B2bMma::Shape1::kM,
threadblock_tile_idx.n() * B2bMma::Shape1::kN
);
// Tile iterator writing to destination tensor
typename Epilogue::OutputTileIterator iterator_D1(
params.iterator_D1,
params.ptr_D1,
ConvOutputIteratorParameter::extent(params.problem_size_1),
thread_idx,
threadblock_offset
);
// Tile iterator reading from source accumulator tensor
typename Epilogue::OutputTileIterator iterator_C1(
params.iterator_C1,
params.ptr_C1,
ConvOutputIteratorParameter::extent(params.problem_size_1),
thread_idx,
threadblock_offset
);
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_idx.k()) {
iterator_C1 = iterator_D1;
}
semaphore.wait(threadblock_tile_idx.k());
__threadfence();
}
// Each split-k-slice writes to a unique tensor location
else if (params.split_k_mode == SplitKMode::kParallel) {
iterator_D1.add_pointer_offset(threadblock_tile_idx.k() *
cutlass::conv::implicit_gemm_tensor_c_size(ConvOperator, params.problem_size_1));
}
// Run efficient epilogue
epilogue(output_op_1, iterator_D1, accumulators, iterator_C1);
//
// Release the semaphore
//
if (params.split_k_mode == SplitKMode::kSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_idx.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_idx.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/kernel/b2b_implicit_gemm_convolution.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/kernel/b2b_implicit_gemm_convolution.h",
"repo_id": "examples",
"token_count": 7161
} | 1 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_types.h"
#include "threadblock/b2b_mma_base.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape0_,
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape1_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy0_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy1_,
/// Shared Memory Accumulator Iterator
typename SmemAccumulatorIterator0_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class B2bMmaBaseSmemAccumulator :
public B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages> {
public:
///< Base class
using Base = B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape0 = Shape0_;
using Shape1 = Shape1_;
///< Policy describing tuning details
using Policy0 = Policy0_;
using Policy1 = Policy1_;
using SmemAccumulatorIterator0 = SmemAccumulatorIterator0_;
//
// Nested structs
//
/// Shared storage object needed by accumulator
template<
typename Shape_,
typename Element_,
typename Layout_,
typename Padding_
>
class AccumulatorSharedStorage {
public:
//
// Type definitions
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using Padding = Padding_;
/// Tensor reference to the accumulator
using TensorRefAccum = TensorRef<Element, Layout>;
/// Shape of the accumulator matrix in shared memory
using ShapeAccum = MatrixShape<Shape::kM + Padding::kRow,
Shape::kN + Padding::kColumn>;
public:
//
// Data members
//
/// Buffer for accumulator
AlignedBuffer<Element, ShapeAccum::kCount> accum;
public:
//
// Methods
//
/// Returns a layout object for the Accum matrix
CUTLASS_DEVICE
static Layout LayoutAccum() {
return Layout::packed({ShapeAccum::kRow, ShapeAccum::kColumn});
}
/// Returns a TensorRef to the Accumulator
CUTLASS_HOST_DEVICE
TensorRefAccum accum_ref() {
return TensorRefAccum{accum.data(), LayoutAccum()};
}
};
using AccumulatorSharedStorage0 = AccumulatorSharedStorage<
Shape0, typename SmemAccumulatorIterator0::Element,
typename SmemAccumulatorIterator0::TensorLayout,
typename SmemAccumulatorIterator0::Padding>;
struct B2bMmaSharedStorage {
typename Base::B2bMmaSharedStorage b2b_mma_shared_storage;
AccumulatorSharedStorage0 accumulator_shared_storage0;
};
public:
/// Construct from tensor references
CUTLASS_DEVICE
B2bMmaBaseSmemAccumulator(
///< Shared storage needed for internal use by threadblock-scoped GEMM
B2bMmaSharedStorage &shared_storage,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx
):
Base(shared_storage.b2b_mma_shared_storage, thread_idx, warp_idx, lane_idx) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h",
"repo_id": "examples",
"token_count": 1950
} | 2 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates how to call a CUTLASS TRMM kernel and provides a naive reference
matrix multiply kernel to verify its correctness.
The CUTLASS Trmm template is instantiated in the function CutlassStrmmNN. This is kernel computes
the triangular matrix product (TRMM) using double-precision floating-point arithmetic and assumes
all matrices have column-major layout.
The threadblock tile size is chosen as 64x64x16 which offers good performance for large matrices.
See the CUTLASS Parallel for All blog post for more exposition on the tunable parameters available
in CUTLASS.
https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/
Aside from defining and launching the STRMM kernel, this example does not use any other components
or utilities within CUTLASS. Such utilities are demonstrated elsewhere in other examples and are
prevalent in the CUTLASS unit tests.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// Helper methods to check for errors
#include "helper.h"
//
// CUTLASS includes needed for double-precision TRMM kernel
//
// Defines cutlass::gemm::device::Trmm, the generic Trmm computation template class.
#include "cutlass/gemm/device/trmm.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// This function defines a CUTLASS TRMM kernel instantiation, constructs its parameters object,
// and launches it on the CUDA device.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS TRMM template and launch a TRMM kernel.
cudaError_t CutlassStrmmNN(
int M,
int N,
double alpha,
double const *A,
int lda,
double const *B,
int ldb,
double *C,
int ldc) {
// Define type definition for double-precision CUTLASS TRMM with column-major
// input matrices and 64x64x16 threadblock tile size (chosen by default).
//
// To keep the interface manageable, several helpers are defined for plausible compositions
// including the following example for double-precision TRMM. Typical values are used as
// default template arguments.
//
// To view the full trmm device API interface, see `cutlass/gemm/device/trmm.h`
using ColumnMajor = cutlass::layout::ColumnMajor;
using CutlassTrmm = cutlass::gemm::device::Trmm<
double,
ColumnMajor,
cutlass::SideMode::kLeft,
cutlass::FillMode::kLower,
cutlass::DiagType::kNonUnit,
double,
ColumnMajor,
double,
ColumnMajor,
double,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<8, 8, 4>,
cutlass::epilogue::thread::LinearCombination<
double,
1,
double,
double,
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
5,
1,
1,
false,
cutlass::arch::OpMultiplyAdd
>;
// Define a CUTLASS TRMM type
CutlassTrmm trmm_operator;
// Construct the CUTLASS TRMM arguments object.
//
// One of CUTLASS's design patterns is to define trmm argument objects that are constructible
// in host code and passed to kernels by value. These may include pointers, strides, scalars,
// and other arguments needed by Trmm and its components.
//
// The benefits of this pattern are (1.) a structured, composable strategy for passing host-constructible
// arguments to kernels and (2.) minimized initialization overhead on kernel entry.
//
CutlassTrmm::Arguments args(cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, M}, // Trmm Problem dimensions in Left-Side Mode
1, // batch_count,
{alpha}, // Scalars used in the Epilogue
reinterpret_cast<void const *>(A),
reinterpret_cast<void const *>(B),
reinterpret_cast<void *>(C), // destination matrix D (may be different memory than source C matrix)
(int64_t)M*M, // Batch strides
(int64_t)M*N,
(int64_t)M*N,
lda,
ldb,
ldc);
//
// Launch the CUTLASS TRMM kernel.
//
cutlass::Status status = trmm_operator(args);
//
// Return a cudaError_t if the CUTLASS TRMM operator returned an error code.
//
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
// Return success, if no errors were encountered.
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// The source code after this point in the file is generic CUDA using the CUDA Runtime API
// and simple CUDA kernels to initialize matrices and compute the general matrix product.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize a matrix with small integers.
__global__ void InitializeMatrix_kernel(
double *matrix,
int ldm,
int rows,
int columns,
int seed = 0,
cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < columns) {
if (fill_mode == cutlass::FillMode::kLower && i < j) return;
else if (fill_mode == cutlass::FillMode::kUpper && i > j) return;
int offset = i + j * ldm;
// Generate arbitrary elements.
int const k = 16807;
int const m = 16;
double value = double(((offset + seed) * k % m) - m / 2);
matrix[offset] = value;
}
}
/// Simple function to initialize a matrix to arbitrary small integers.
cudaError_t InitializeMatrix(double *matrix, int ldm, int rows, int columns, int seed = 0,
cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) {
dim3 block(16, 16);
dim3 grid(
(rows + block.x - 1) / block.x,
(columns + block.y - 1) / block.y
);
InitializeMatrix_kernel<<< grid, block >>>(matrix, ldm, rows, columns, seed, fill_mode);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocates device memory for a matrix then fills with arbitrary small integers.
cudaError_t AllocateMatrix(double **matrix, int ldm, int rows, int columns, int seed = 0,
cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) {
cudaError_t result;
size_t sizeof_matrix = sizeof(double) * ldm * columns;
// Allocate device memory.
result = cudaMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to allocate matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Clear the allocation.
result = cudaMemset(*matrix, 0, sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to clear matrix device memory: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Initialize matrix elements to arbitrary small integers.
result = InitializeMatrix(*matrix, ldm, rows, columns, seed, fill_mode);
if (result != cudaSuccess) {
std::cerr << "Failed to initialize matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
return result;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Naive reference TRMM computation.
__global__ void ReferenceTrmm_kernel(
int M,
int N,
double alpha,
double const *A,
int lda,
double const *B,
int ldb,
double *C,
int ldc) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < M && j < N) {
double accumulator = 0;
for (int k = 0; k < M; ++k) {
accumulator += A[i + k * lda] * B[k + j * ldb]; // Since A is in Left-Side Mode
}
C[i + j * ldc] = alpha * accumulator;
}
}
/// Reference TRMM computation.
cudaError_t ReferenceTrmm(
int M,
int N,
double alpha,
double const *A,
int lda,
double const *B,
int ldb,
double *C,
int ldc) {
dim3 block(16, 16);
dim3 grid(
(M + block.x - 1) / block.x,
(N + block.y - 1) / block.y
);
ReferenceTrmm_kernel<<< grid, block >>>(M, N, alpha, A, lda, B, ldb, C, ldc);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a double-precision
/// CUTLASS TRMM kernel.
cudaError_t TestCutlassTrmm(int M, int N, double alpha) {
cudaError_t result;
//
// Define several matrices to be used as operands to TRMM kernels.
//
// Compute leading dimensions for each matrix.
int lda = M;
int ldb = M;
int ldc = M;
// Compute size in bytes of the C matrix.
size_t sizeof_C = sizeof(double) * ldc * N;
// Define pointers to matrices in GPU device memory.
double *A;
double *B;
double *C_cutlass;
double *C_reference;
//
// Allocate matrices in GPU device memory with arbitrary seeds.
//
result = AllocateMatrix(&A, lda, M, M, 0, cutlass::FillMode::kLower);
if (result != cudaSuccess) {
return result;
}
result = AllocateMatrix(&B, ldb, M, N, 17);
if (result != cudaSuccess) {
cudaFree(A);
return result;
}
result = AllocateMatrix(&C_cutlass, ldc, M, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(B);
return result;
}
result = AllocateMatrix(&C_reference, ldc, M, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(B);
cudaFree(C_cutlass);
return result;
}
result = cudaMemcpy(C_reference, C_cutlass, sizeof_C, cudaMemcpyDeviceToDevice);
if (result != cudaSuccess) {
std::cerr << "Failed to copy C_cutlass matrix to C_reference: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Launch CUTLASS TRMM.
//
result = CutlassStrmmNN(M, N, alpha, A, lda, B, ldb, C_cutlass, ldc);
if (result != cudaSuccess) {
std::cerr << "CUTLASS TRMM kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Verify.
//
// Launch reference TRMM
result = ReferenceTrmm(M, N, alpha, A, lda, B, ldb, C_reference, ldc);
if (result != cudaSuccess) {
std::cerr << "Reference TRMM kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
// Copy to host and verify equivalence.
std::vector<double> host_cutlass(ldc * N, 0);
std::vector<double> host_reference(ldc * N, 0);
result = cudaMemcpy(host_cutlass.data(), C_cutlass, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy CUTLASS TRMM results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
result = cudaMemcpy(host_reference.data(), C_reference, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy Reference TRMM results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Free device memory allocations.
//
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
//
// Test for bit equivalence of results.
//
if (host_cutlass != host_reference) {
std::cerr << "CUTLASS results incorrect." << std::endl;
return cudaErrorUnknown;
}
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to basic_trmm example.
//
// usage:
//
// 00_basic_trmm <M> <N> <alpha>
//
int main(int argc, const char *arg[]) {
bool notSupported = false;
// CUTLASS must be compiled with CUDA 11 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "NVIDIA Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "This example requires compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
//
// Parse the command line to obtain TRMM dimensions and scalar values.
//
// TRMM problem dimensions.
int problem[2] = { 128, 128 };
for (int i = 1; i < argc && i < 3; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Scalars used for linear scaling the result of the matrix product.
double scalars[1] = { 1 };
for (int i = 3; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 3];
}
//
// Run the CUTLASS TRMM test.
//
cudaError_t result = TestCutlassTrmm(
problem[0], // TRMM M dimension
problem[1], // TRMM N dimension
scalars[0] // alpha
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| examples/32_basic_trmm/basic_trmm.cu/0 | {
"file_path": "examples/32_basic_trmm/basic_trmm.cu",
"repo_id": "examples",
"token_count": 5539
} | 3 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief SYR2K Grouped Example.
This workload computes a batch of SYR2K operations with distinct problem sizes. This example closely
follows 24_gemm_grouped.
Examples:
# Runs a grouped SYR2K with 100 random problem sizes
$ ./examples/38_syr2k_grouped/38_syr2k_grouped --groups=100
# Runs a grouped SYR2K with 100 random problem sizes (with SYR2K-K dimension equal to 1024)
$ ./examples/38_syr2k_grouped/24_gemm_grouped --groups=100 --k=1024 --verbose=true
# Runs a grouped SYR2K that is equivalent to a batched SYR2K
$ ./examples/38_syr2k_grouped/38_syr2k_grouped --groups=100 --n=1024 --k=1024 --verbose=true
# Execute grouped SYR2K and profile with NSight
$ nv-nsight-cu-cli ./examples/38_syr2k_grouped/38_syr2k_grouped --n=256 --k=256 --verbose=true \
--iterations=1 --reference-check=false
*/
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <chrono>
#include <iostream>
#include <fstream>
#include <sstream>
#include <unordered_map>
#include <vector>
#include "cutlass/blas3.h"
#include "cutlass/cutlass.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/kernel/rank_2k_grouped.h"
#include "cutlass/gemm/kernel/default_rank_2k_grouped.h"
#include "cutlass/gemm/device/rank_2k_grouped.h"
#include "cutlass/gemm/device/rank_2k.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/rank_2k_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double initialization_time_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double initialization_time_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), initialization_time_ms(initialization_time_ms), gflops(gflops),
status(status), error(error), passed(true) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
bool error;
bool reference_check;
bool profile_initialization;
bool sort_problems;
std::vector<cutlass::gemm::GemmCoord> problem_sizes;
int alignment;
int problem_count;
int iterations;
int cuda_streams;
bool verbose;
float alpha;
float beta;
std::string benchmark_path;
std::string output_tag;
std::ofstream output_file;
using GroupScheduleMode = cutlass::gemm::kernel::GroupScheduleMode;
std::vector<GroupScheduleMode> scheduler_modes;
std::unordered_map<std::string, GroupScheduleMode>
str_to_scheduler_mode = {
{"kDeviceOnly", GroupScheduleMode::kDeviceOnly},
{"kHostPrecompute", GroupScheduleMode::kHostPrecompute}
};
struct GroupScheduleModeHash {
size_t operator()(GroupScheduleMode m) const {
return static_cast<size_t>(m);
}
};
std::unordered_map<GroupScheduleMode, std::string, GroupScheduleModeHash>
scheduler_mode_to_str = {
{GroupScheduleMode::kDeviceOnly, "kDeviceOnly"},
{GroupScheduleMode::kHostPrecompute, "kHostPrecompute"}
};
std::vector<GroupScheduleMode> all_scheduler_modes = {GroupScheduleMode::kDeviceOnly, GroupScheduleMode::kHostPrecompute};
//
// Methods
//
Options():
help(false),
error(false),
alignment(8),
reference_check(true),
profile_initialization(false),
sort_problems(false),
problem_count(5),
iterations(20),
cuda_streams(0),
verbose(false),
alpha(1),
beta(),
scheduler_modes({GroupScheduleMode::kDeviceOnly})
{ }
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("alignment", alignment, 8);
cmd.get_cmd_line_argument("groups", problem_count, 5);
cmd.get_cmd_line_argument("alpha", alpha, 1.0f);
cmd.get_cmd_line_argument("beta", beta, 0.0f);
cmd.get_cmd_line_argument("iterations", iterations, 20);
cmd.get_cmd_line_argument("streams", cuda_streams, 0);
cmd.get_cmd_line_argument("verbose", verbose, false);
cmd.get_cmd_line_argument("reference-check", reference_check, true);
cmd.get_cmd_line_argument("profile-initialization", profile_initialization, false);
cmd.get_cmd_line_argument("sort-problems", sort_problems, false);
cmd.get_cmd_line_argument("benchmark", benchmark_path);
std::vector<std::string> scheduler_mode_strs;
cmd.get_cmd_line_arguments("scheduler-modes", scheduler_mode_strs);
if (!scheduler_mode_strs.empty()) {
scheduler_modes.clear();
if (scheduler_mode_strs.size() == 1 && scheduler_mode_strs[0] == "all") {
scheduler_modes = all_scheduler_modes;
} else {
for (std::string precomp_str : scheduler_mode_strs) {
auto it = str_to_scheduler_mode.find(precomp_str);
if (it != str_to_scheduler_mode.end()) {
scheduler_modes.push_back(it->second);
} else if (precomp_str == "all") {
std::cerr << "Flag --scheduler-modes=all must not contain other scheduler modes in list." << std::endl;
error = true;
return;
} else {
std::cerr << "Unrecognized scheduler mode '" << precomp_str << "'" << std::endl;
error = true;
return;
}
}
}
}
std::string output_path;
cmd.get_cmd_line_argument("tag", output_tag);
cmd.get_cmd_line_argument("output_file", output_path);
if (!output_path.empty()) {
std::ios_base::openmode open_mode = std::ios_base::out;
std::ifstream input_file(output_path.c_str());
if (input_file.good()) {
open_mode = std::ios_base::app;
input_file.close();
}
output_file.open(output_path.c_str(), open_mode);
if (output_file.good() && open_mode != std::ios_base::app) {
output_file << "Tag,Provider,Kind,Groups,Runtime,GFLOPs\n";
}
}
// Decide how to initialize the problems
if (!benchmark_path.empty()) {
if (!benchmark_problems()) {
error = true;
problem_sizes.clear();
return;
}
}
else {
randomize_problems(cmd);
}
}
void randomize_problems(cutlass::CommandLine &cmd) {
//
// For now, randomly choose the problem sizes.
//
int cmd_line_m = -1;
int cmd_line_n = -1;
int cmd_line_k = -1;
cmd.get_cmd_line_argument("m", cmd_line_m);
cmd.get_cmd_line_argument("n", cmd_line_n);
cmd.get_cmd_line_argument("k", cmd_line_k);
// SYR2K is defined via only N and K.
if (cmd_line_m != -1) {
std::cerr << "Parameter M is ignored for SYR2K\n";
error = true;
return;
}
problem_sizes.reserve(problem_count);
for (int i = 0; i < problem_count; ++i) {
int n = cmd_line_n;
int k = cmd_line_k;
if (n < 1) {
n = alignment * ((rand() % 256) + 1);
}
if (k < 1) {
k = alignment * ((rand() % 256) + 1);
}
// SYR2K is defined only in terms of N and K. Replicate N into
// the SYR2K-N dimension.
cutlass::gemm::GemmCoord problem(n, n, k);
problem_sizes.push_back(problem);
}
}
/// Load a benchmark
bool benchmark_problems() {
std::ifstream file(benchmark_path);
if (!file.good()) {
return false;
}
while (file.good()) {
int idx = -1;
std::string extent_str;
file >> idx >> extent_str;
if (idx < 0 || extent_str.empty()) {
break;
}
cutlass::gemm::GemmCoord extent;
std::vector<std::string> tokens;
cutlass::CommandLine::tokenize(tokens, extent_str, 'x');
for (int i = 0; i < int(tokens.size()); ++i) {
int x = std::atoi(tokens.at(i).c_str());
// round up
if (x % alignment) {
x += (alignment - (x % alignment));
}
extent.at(i) = x;
}
if (extent.product()) {
problem_sizes.push_back(extent);
}
}
problem_count = int(problem_sizes.size());
return true;
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "38_syr2k_grouped\n\n"
<< " This example profiles the performance of a 'grouped' SYR2K kernel. This example closely follows 24_gemm_grouped\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --benchmark=<str> Executes a benchmark problem size.\n"
<< " --output_file=<str> Path to a CSV file to output results. If it exists already, results are appended.\n"
<< " --tag=<str> String tag to prepend to the CSV file.\n"
<< " --groups=<int> Number of individual SYR2K problems (default: --groups=15)\n"
<< " --m=<int> Sets the M dimension for all groups. Otherwise, it is selected randomly\n"
<< " --n=<int> Sets the N dimension for all groups. Otherwise, it is selected randomly\n"
<< " --k=<int> Sets the K dimension for all groups. Otherwise, it is selected randomly\n"
<< " --alpha=<f32> Epilogue scalar alpha (real part)\n"
<< " --beta=<f32> Epilogue scalar beta (real part)\n"
<< " --scheduler-modes=<str> List of scheduler modes to be profile for grouped GEMM scheduler (default: --scheduler_modes=kDeviceOnly)\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --reference-check=<bool> If true, performs reference check.\n"
<< " --verbose=<bool> If true, prints problem sizes and batching structure.\n"
<< " --profile-initialization=<bool> If true, profiles the device-level kernel's initialization.\n"
<< " --sort-problems=<bool> If true, sorts problem sizes in descending order of SYR2K-K dimension.\n";
out << "\n\nExamples:\n\n"
<< "# Runs a grouped SYR2K with 100 random problem sizes\n"
<< "$ ./examples/38_syr2k_grouped/38_syr2k_grouped --groups=100\n\n"
<< "# Runs a grouped SYR2K with 100 random problem sizes (with K dimension equal to 1024)\n"
<< "$ ./examples/38_syr2k_grouped/38_syr2k_grouped --groups=100 --k=1024 --verbose=true\n\n"
<< "# Runs a grouped SYR2K that is equivalent to a batched SYR2K\n"
<< "$ ./examples/38_syr2k_grouped/38_syr2k_grouped --groups=100 --n=1024 --k=1024 --verbose=true\n\n"
<< "# Runs a grouped SYR2K with each different scheduler mode\n"
<< "$ ./examples/38_syr2k_grouped/38_syr2k_grouped --scheduler-modes=all\n\n"
<< "# Runs a grouped SYR2K with each different scheduler mode and profiles host-side initialization time\n"
<< "$ ./examples/38_syr2k_grouped/38_syr2k_grouped --scheduler-modes=all --profile-initialization=true\n\n"
<< "# Runs a grouped SYR2K problem given an externally supplied benchmark file. This is a text file in which\n"
<< "# Each line contains a unique group index and an MxNxK triple indicating problemsize. NOTE that the\n"
<< "# GEMM-M and GEMM-N dimensions must match.\n"
<< "#\n"
<< "# For example, assume the following are the contents of 'problems.txt'\n"
<< "#\n"
<< "# 0 256x256x520\n"
<< "# 1 264x264x1024\n"
<< "# 2 48x48x1024\n"
<< "#\n"
<< "$ ./examples/38_syr2k_grouped/38_syr2k_grouped --benchmark=problems.txt\n\n"
<< "# Execute Grouped SYR2K and profile with NSight\n"
<< "$ nv-nsight-cu-cli ./examples/38_syr2k_grouped/38_syr2k_grouped --n=256 --k=256 --verbose=true --iterations=1 --reference-check=false\n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = int64_t();
for (auto const & problem : problem_sizes) {
fmas += problem.product();
}
// SYR2K is defined as (A x BT) + (B x AT), so the number of FMAs is twice that in a GEMM
fmas *= 2;
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Rank2K>
class BaseTestbed {
public:
//
// Type definitions
//
using ElementA = typename Rank2K::ElementA;
using ElementB = typename Rank2K::ElementB;
using ElementC = typename Rank2K::ElementC;
using ElementAccumulator = typename Rank2K::ElementAccumulator;
using EpilogueOutputOp = typename Rank2K::Rank2Kkernel::Epilogue::OutputOp;
using ElementCompute = typename EpilogueOutputOp::ElementCompute;
using LayoutA = typename Rank2K::LayoutA;
using LayoutB = typename Rank2K::LayoutB;
using LayoutC = typename Rank2K::LayoutC;
using MatrixCoord = typename LayoutC::TensorCoord;
//
// Data members
//
Options & options;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint32_t seed;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device;
std::vector<int64_t> offset_A;
std::vector<int64_t> offset_B;
std::vector<int64_t> offset_C;
std::vector<int64_t> offset_D;
std::vector<int64_t> lda_host;
std::vector<int64_t> ldb_host;
std::vector<int64_t> ldc_host;
std::vector<int64_t> ldd_host;
cutlass::DeviceAllocation<int64_t> lda;
cutlass::DeviceAllocation<int64_t> ldb;
cutlass::DeviceAllocation<int64_t> ldc;
cutlass::DeviceAllocation<int64_t> ldd;
cutlass::DeviceAllocation<ElementA> block_A;
cutlass::DeviceAllocation<ElementB> block_B;
cutlass::DeviceAllocation<ElementC> block_C;
cutlass::DeviceAllocation<ElementC> block_D;
cutlass::DeviceAllocation<ElementA *> ptr_A;
cutlass::DeviceAllocation<ElementB *> ptr_B;
cutlass::DeviceAllocation<ElementC *> ptr_C;
cutlass::DeviceAllocation<ElementC *> ptr_D;
BaseTestbed(
Options &options_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
):
options(options_), init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
int problem_count() const {
return options.problem_count;
}
/// Helper to initialize a tensor view
template <typename Element>
void initialize_tensor(
Element *ptr,
size_t capacity,
cutlass::Distribution::Kind dist_kind,
uint32_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) {
scope_max = 5;
scope_min = -5;
}
else {
scope_max = 8;
scope_min = -8;
}
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::device::BlockFillRandomUniform(
ptr, capacity, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::device::BlockFillRandomGaussian(
ptr, capacity, seed, Element(), Element(0.5f));
}
else if (dist_kind == cutlass::Distribution::Sequential) {
// Fill with increasing elements
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(1), Element());
}
else {
// Fill with all 1s
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(), Element(1));
}
}
/// Allocates device-side data
void allocate() {
int64_t total_elements_A = 0;
int64_t total_elements_B = 0;
int64_t total_elements_C = 0;
int64_t total_elements_D = 0;
lda_host.resize(problem_count());
ldb_host.resize(problem_count());
ldc_host.resize(problem_count());
ldd_host.resize(problem_count());
for (int32_t i = 0; i < problem_count(); ++i) {
auto problem = options.problem_sizes.at(i);
lda_host.at(i) = LayoutA::packed({problem.n(), problem.k()}).stride(0);
ldb_host.at(i) = LayoutB::packed({problem.n(), problem.k()}).stride(0);
ldc_host.at(i) = LayoutC::packed({problem.n(), problem.n()}).stride(0);
ldd_host.at(i) = LayoutC::packed({problem.n(), problem.n()}).stride(0);
offset_A.push_back(total_elements_A);
offset_B.push_back(total_elements_B);
offset_C.push_back(total_elements_C);
offset_D.push_back(total_elements_D);
int64_t elements_A = problem.n() * problem.k();
int64_t elements_B = problem.n() * problem.k();
int64_t elements_C = problem.n() * problem.n();
int64_t elements_D = problem.n() * problem.n();
total_elements_A += elements_A;
total_elements_B += elements_B;
total_elements_C += elements_C;
total_elements_D += elements_D;
}
lda.reset(problem_count());
ldb.reset(problem_count());
ldc.reset(problem_count());
ldd.reset(problem_count());
block_A.reset(total_elements_A);
block_B.reset(total_elements_B);
block_C.reset(total_elements_C);
block_D.reset(total_elements_D);
}
/// Initializes device-side data
void initialize() {
problem_sizes_device.reset(problem_count());
problem_sizes_device.copy_from_host(options.problem_sizes.data());
lda.copy_from_host(lda_host.data());
ldb.copy_from_host(ldb_host.data());
ldc.copy_from_host(ldc_host.data());
ldd.copy_from_host(ldd_host.data());
//
// Assign pointers
//
std::vector<ElementA *> ptr_A_host(problem_count());
std::vector<ElementB *> ptr_B_host(problem_count());
std::vector<ElementC *> ptr_C_host(problem_count());
std::vector<ElementC *> ptr_D_host(problem_count());
for (int32_t i = 0; i < problem_count(); ++i) {
ptr_A_host.at(i) = block_A.get() + offset_A.at(i);
ptr_B_host.at(i) = block_B.get() + offset_B.at(i);
ptr_C_host.at(i) = block_C.get() + offset_C.at(i);
ptr_D_host.at(i) = block_D.get() + offset_D.at(i);
}
ptr_A.reset(problem_count());
ptr_A.copy_from_host(ptr_A_host.data());
ptr_B.reset(problem_count());
ptr_B.copy_from_host(ptr_B_host.data());
ptr_C.reset(problem_count());
ptr_C.copy_from_host(ptr_C_host.data());
ptr_D.reset(problem_count());
ptr_D.copy_from_host(ptr_D_host.data());
//
// Initialize the problems of the workspace
//
initialize_tensor(block_A.get(), block_A.size(), init_A, seed * 2021);
initialize_tensor(block_B.get(), block_B.size(), init_B, seed * 2022);
initialize_tensor(block_C.get(), block_C.size(), init_C, seed * 2023);
cutlass::reference::device::BlockFillSequential(
block_D.get(), block_D.size(), ElementC(), ElementC());
}
/// Verifies the result is a SYR2K
bool verify() {
bool passed = true;
for (int32_t i = 0; i < problem_count(); ++i) {
cutlass::gemm::GemmCoord problem = options.problem_sizes.at(i);
LayoutA layout_A(lda_host.at(i));
LayoutB layout_B(ldb_host.at(i));
LayoutC layout_C(ldc_host.at(i));
LayoutC layout_D(ldd_host.at(i));
cutlass::HostTensor<ElementA, LayoutA> host_A(
typename LayoutA::TensorCoord(problem.n(), problem.k()), /*device_backed=*/false);
cutlass::HostTensor<ElementB, LayoutB> host_B(
typename LayoutB::TensorCoord(problem.n(), problem.k()), /*device_backed=*/false);
cutlass::HostTensor<ElementC, LayoutC> host_C(
typename LayoutC::TensorCoord(problem.n(), problem.n()), /*device_backed=*/false);
cutlass::HostTensor<ElementC, LayoutC> host_D(
typename LayoutC::TensorCoord(problem.n(), problem.n()), /*device_backed=*/false);
cutlass::device_memory::copy_to_host(host_A.host_data(), block_A.get() + offset_A.at(i), problem.n() * problem.k());
cutlass::device_memory::copy_to_host(host_B.host_data(), block_B.get() + offset_B.at(i), problem.n() * problem.k());
cutlass::device_memory::copy_to_host(host_C.host_data(), block_C.get() + offset_C.at(i), problem.n() * problem.n());
cutlass::reference::host::BlockFillSequential(
host_D.host_data(), problem.n() * problem.n(), ElementC(), ElementC());
MatrixCoord extent_C{problem.n(), problem.n()};
// Reference Rank2K
cutlass::reference::host::Rank2KComplex<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementC, ElementAccumulator
>(
problem,
(double)options.alpha,
host_A.host_view(),
Rank2K::kTransformA,
host_B.host_view(),
Rank2K::kTransformB,
(double)options.beta,
host_C.host_view(),
host_D.host_view(),
ElementAccumulator(0),
Rank2K::kFillModeC,
Rank2K::kBlasMode
);
// Copy to host memory
std::vector<ElementC> matrix_D(layout_D.capacity(extent_C));
cutlass::device_memory::copy_to_host(matrix_D.data(), block_D.get() + offset_D.at(i), matrix_D.size());
cutlass::TensorView<ElementC, LayoutC> view_D(matrix_D.data(), layout_D, extent_C);
cutlass::TensorView<ElementC, LayoutC> view_Ref = host_D.host_view();
// Reference check
passed = cutlass::reference::host::TensorEquals(view_D, view_Ref);
if (!passed) {
std::cerr << "\n***\nError - problem " << i << " failed the QA check\n***\n" << std::endl;
return passed;
}
}
return passed;
}
};
template <typename Rank2K>
class TestbedConventional : BaseTestbed<Rank2K> {
public:
TestbedConventional(
Options &options_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
): BaseTestbed<Rank2K>(options_, init_A_, init_B_, init_C_, seed_) {}
/// Verbose printing of problem sizes
void print_problem_sizes() {
// Print groups
std::cout << this->problem_count() << " groups:\n";
int32_t idx = 0;
int64_t total_tiles = 0;
for (auto const & problem : this->options.problem_sizes) {
int tiles =
((problem.m() + Rank2K::ThreadblockShape::kM - 1) / Rank2K::ThreadblockShape::kM) *
((problem.n() + Rank2K::ThreadblockShape::kN - 1) / Rank2K::ThreadblockShape::kN);
total_tiles += tiles;
std::cout << " [" << idx << "]: "
<< problem.m() << "-by-" << problem.n() << "-by-" << problem.k()
<< " (" << tiles << " threadblock tiles)" << "\n";
++idx;
}
std::cout << std::endl;
}
/// Executes a conventional SYR2K kernel.
Result profile() {
std::cout << "Conventional Rank2K:\n"
<< "====================================================" << std::endl;
Result result;
result.passed = false;
// Initialize the problem
this->allocate();
this->initialize();
if (this->options.verbose) {
print_problem_sizes();
}
//
// Create CUDA streams to maximize concurrency of SYR2K kernels
//
int32_t effective_streams = (this->options.cuda_streams ? this->options.cuda_streams : 1);
std::vector<cudaStream_t> cuda_streams;
char const *provider = "CUTLASS";
//
// Warmup run
//
if (this->options.cuda_streams) {
for (int i = 0; i < this->options.cuda_streams; ++i) {
cudaStream_t stream;
result.error = cudaStreamCreate(&stream);
if (result.error != cudaSuccess) {
std::cerr << "Failed to create CUDA stream." << std::endl;
return result;
}
cuda_streams.push_back(stream);
}
}
else {
cuda_streams.push_back(nullptr);
}
// Use 'D' for the in/out workspace
this->block_D.copy_from_device(this->block_C.get());
for (size_t i = 0; i < this->options.problem_sizes.size(); ++i) {
cutlass::gemm::GemmCoord const & problem = this->options.problem_sizes[i];
int32_t batch_count = 1;
int64_t lda = this->lda_host.at(i);
int64_t ldb = this->ldb_host.at(i);
int64_t ldc = this->ldc_host.at(i);
typename Rank2K::ElementA* ptrA = this->block_A.get() + this->offset_A.at(i);
typename Rank2K::ElementB* ptrB = this->block_B.get() + this->offset_B.at(i);
typename Rank2K::ElementC* ptrC = this->block_C.get() + this->offset_C.at(i);
typename Rank2K::ElementC* ptrD = this->block_D.get() + this->offset_D.at(i);
//
// Initialize the CUTLASS SYR2K operator
//
// Configure the SYR2K arguments
typename Rank2K::EpilogueOutputOp::Params epilogue_op(this->options.alpha, this->options.beta);
typename Rank2K::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
problem,
batch_count,
epilogue_op,
(void const *)ptrA,
(void const *)ptrB,
(void const *)ptrC,
(void *)ptrD,
int64_t(),
int64_t(),
int64_t(),
int64_t(),
int64_t(lda),
int64_t(ldb),
int64_t(ldc),
int64_t(ldc)
};
Rank2K rank2k_op;
cutlass::Status status = rank2k_op.initialize(arguments);
if (status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS error on line " << __LINE__ << std::endl;
return result;
}
status = rank2k_op();
if (status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS error on line " << __LINE__ << std::endl;
return result;
}
}
//
// Wait for completion
//
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error);
return result;
}
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
//
// Wait for completion
//
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error);
return result;
}
// Record an event at the start of a series of SYR2K operations
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
size_t last_stream_idx = 0;
for (int iter = 0; iter < this->options.iterations; ++iter) {
for (size_t i = 0; i < this->options.problem_sizes.size(); ++i) {
cutlass::gemm::GemmCoord const & problem = this->options.problem_sizes[i];
int32_t batch_count = 1;
int64_t lda = this->lda_host.at(i);
int64_t ldb = this->ldb_host.at(i);
int64_t ldc = this->ldc_host.at(i);
typename Rank2K::ElementA* ptrA = this->block_A.get() + this->offset_A.at(i);
typename Rank2K::ElementB* ptrB = this->block_B.get() + this->offset_B.at(i);
typename Rank2K::ElementC* ptrC = this->block_C.get() + this->offset_C.at(i);
typename Rank2K::ElementC* ptrD = this->block_D.get() + this->offset_D.at(i);
last_stream_idx = (i % effective_streams);
//
// Initialize the CUTLASS SYR2K operator
//
// Configure the SYR2K arguments
typename Rank2K::EpilogueOutputOp::Params epilogue_op(this->options.alpha, this->options.beta);
typename Rank2K::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
problem,
batch_count,
epilogue_op,
(void const *)ptrA,
(void const *)ptrB,
(void const *)ptrC,
(void *)ptrD,
int64_t(),
int64_t(),
int64_t(),
int64_t(),
int64_t(lda),
int64_t(ldb),
int64_t(ldc),
int64_t(ldc)
};
Rank2K rank2k_op;
cutlass::Status status = rank2k_op.initialize(arguments);
if (status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS error on line " << __LINE__ << std::endl;
return result;
}
status = rank2k_op(cuda_streams[last_stream_idx]);
if (status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS error on line " << __LINE__ << std::endl;
return result;
}
}
}
//
// Stop profiling loop
//
// Record an event when the SYR2K operations have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Wait for work to be completed
//
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error);
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(this->options.iterations);
result.gflops = this->options.gflops(result.runtime_ms / 1000.0);
//
// Cleanup
//
for (auto event : events) {
(void)cudaEventDestroy(event);
}
for (auto stream : cuda_streams) {
if (stream) {
(void)cudaStreamDestroy(stream);
}
}
std::cout << " " << this->options.problem_sizes.size() << " conventional Rank2Ks launched" << std::endl;
std::cout << std::endl;
std::cout << " " << "Conventional Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " " << "Conventional GFLOPS: " << result.gflops << std::endl;
if (this->options.output_file.good()) {
this->options.output_file << this->options.output_tag << "," << provider << ",conventional,"
<< this->problem_count() << "," << result.runtime_ms << "," << result.gflops << std::endl;
}
result.passed = true;
return result;
}
};
template <typename Rank2K_, cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode_>
class TestbedGrouped : BaseTestbed<Rank2K_> {
public:
TestbedGrouped(
Options &options_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
) : BaseTestbed<Rank2K_>(options_, init_A_, init_B_, init_C_, seed_) {}
// Redefine Rank2K with different GroupScheduleMode_
using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped<
typename Rank2K_::ElementA, typename Rank2K_::LayoutA, Rank2K_::kTransformA, Rank2K_::kAlignmentA,
typename Rank2K_::ElementB, typename Rank2K_::LayoutB, Rank2K_::kTransformB, Rank2K_::kAlignmentB,
typename Rank2K_::ElementC, typename Rank2K_::LayoutC, Rank2K_::kFillModeC,
typename Rank2K_::ElementAccumulator,
typename Rank2K_::OperatorClass,
typename Rank2K_::ArchTag,
typename Rank2K_::ThreadblockShape,
typename Rank2K_::WarpShape,
typename Rank2K_::InstructionShape,
typename Rank2K_::EpilogueOutputOp,
typename Rank2K_::ThreadblockSwizzle,
Rank2K_::kStages,
typename Rank2K_::Operator::ArchMmaOperator::Operator,
Rank2K_::kBlasMode,
GroupScheduleMode_>::Rank2Kkernel;
using Rank2K = cutlass::gemm::device::Rank2KGrouped<Rank2Kkernel>;
/// Verbose printing of problem sizes
void print_problem_sizes() {
// Print groups
std::cout << this->problem_count() << " groups:\n";
int32_t idx = 0;
int64_t total_tiles = 0;
for (auto const & problem : this->options.problem_sizes) {
int tiles = Rank2K::problem_tile_count(problem);
total_tiles += tiles;
std::cout << " [" << idx << "]: "
<< problem.m() << "-by-" << problem.n() << "-by-" << problem.k()
<< " (" << tiles << " threadblock tiles)" << "\n";
++idx;
}
std::cout << std::endl;
}
/// Sort problems in descending order of problem-K dimension
void sort_problems() {
Rank2K::sort_problems(this->options.problem_count,
this->options.problem_sizes.data(),
this->lda_host.data(),
this->ldb_host.data(),
this->ldc_host.data(),
this->ldd_host.data(),
this->offset_A.data(),
this->offset_B.data(),
this->offset_C.data(),
this->offset_D.data());
}
/// Executes a grouped kernel and measures runtime.
Result profile() {
std::string sched_mode = this->options.scheduler_mode_to_str.find(GroupScheduleMode_)->second;
std::cout << std::endl;
std::cout << "Grouped Rank2K (CUTLASS) with mode " << sched_mode << ":\n"
<< "====================================================" << std::endl;
Result result;
int threadblock_count = Rank2K::sufficient(this->options.problem_sizes.data(), this->options.problem_count);
// Early exit
if (!threadblock_count) {
std::cout << "Active CUDA device lacks hardware resources to run CUTLASS Grouped SYR2K kernel." << std::endl;
return result;
}
result.passed = false;
// Initialize the problem
this->allocate();
if (this->options.sort_problems) {
sort_problems();
}
this->initialize();
if (this->options.verbose) {
print_problem_sizes();
}
// Configure the Rank2K arguments
typename Rank2K::EpilogueOutputOp::Params epilogue_op(this->options.alpha, this->options.beta);
// Configure Rank2K arguments
typename Rank2K::Arguments args(
cutlass::gemm::GemmUniversalMode::kGemm,
this->problem_sizes_device.get(),
this->problem_count(),
threadblock_count,
epilogue_op,
this->ptr_A.get(),
this->ptr_B.get(),
this->ptr_C.get(),
this->ptr_D.get(),
this->lda.get(),
this->ldb.get(),
this->ldc.get(),
this->ldd.get(),
this->options.problem_sizes.data()
);
// Initialize the Rank2K object
Rank2K rank2k{};
size_t workspace_size = rank2k.get_workspace_size(args);
cutlass::DeviceAllocation<uint8_t> workspace(workspace_size);
result.status = rank2k.initialize(args, workspace.get());
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to initialize CUTLASS Grouped Rank2K kernel." << std::endl;
return result;
}
// Run the grouped Rank2K object
result.status = rank2k.run();
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run CUTLASS Grouped Rank2K kernel." << std::endl;
return result;
}
// Wait for completion
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error);
return result;
}
//
// Verify correctness
//
result.passed = true;
if (this->options.reference_check) {
result.passed = this->verify();
}
//
// Warm-up run of the grouped Rank2K object
//
result.status = rank2k.run();
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run CUTLASS Grouped Rank2K kernel." << std::endl;
return result;
}
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of SYR2K operations
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
for (int iter = 0; iter < this->options.iterations; ++iter) {
rank2k();
}
//
// Stop profiling loop
//
// Record an event when the Rank2K operations have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(this->options.iterations);
result.gflops = this->options.gflops(result.runtime_ms / 1000.0);
//
// Cleanup
//
for (auto event : events) {
(void)cudaEventDestroy(event);
}
// Optionally profile initialization
if (this->options.profile_initialization) {
// Warm up
rank2k.initialize(args, workspace.get());
auto start_time = std::chrono::high_resolution_clock::now();
for (int32_t i = 0; i < this->options.iterations; ++i) {
rank2k.initialize(args, workspace.get());
}
auto end_time = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> duration = end_time - start_time;
duration /= double(this->options.iterations);
result.initialization_time_ms = duration.count();
}
int64_t total_tiles = Rank2K::group_tile_count(args);
std::cout << " " << total_tiles << " total threadblock tiles." << std::endl;
std::cout << std::endl;
std::cout << " " << "Grouped Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " " << "Grouped GFLOPs: " << result.gflops << std::endl;
if (this->options.profile_initialization) {
std::cout << " " << "Init Runtime: " << result.initialization_time_ms << " ms" << std::endl;
}
if (this->options.output_file.good()) {
this->options.output_file << this->options.output_tag << ",CUTLASS,grouped-" << sched_mode << ","
<< this->problem_count() << "," << result.runtime_ms << "," << result.gflops << std::endl;
}
std::cout << "\nPassed\n";
return result;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) {
//
// This example requires an NVIDIA Ampere-architecture GPU.
//
std::cout
<< "CUTLASS's Grouped Rank2K example requires a GPU of NVIDIA's Ampere Architecture or "
<< "later (compute capability 80 or greater).\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.error) {
std::cerr << "Aborting execution." << std::endl;
return -1;
}
//
// Define the Grouped and Conventional Rank2K types
//
using ElementA = double;
using ElementB = double;
using ElementOutput = double;
using ElementAccumulator = double;
const cutlass::FillMode kFillModeC = cutlass::FillMode::kLower;
const int kAlignmentA = 1;
const int kAlignmentB = 1;
const cutlass::ComplexTransform kTransformA = cutlass::ComplexTransform::kNone;
const cutlass::ComplexTransform kTransformB = cutlass::ComplexTransform::kNone;
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
using OperatorClass = cutlass::arch::OpClassTensorOp;
using ArchTag = cutlass::arch::Sm80;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, 1,
ElementAccumulator, ElementAccumulator>;
// NOTE: Threadblock swizzling is currently not supported by CUTLASS's grouped kernels.
// This parameter is passed in at present to match the APIs of other kernels. The parameter
// is unused within the kernel.
using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
const int kStages = 4;
const bool kSplitKSerial = false;
using Operator = cutlass::arch::OpMultiplyAdd;
const cutlass::BlasMode kBlasMode = cutlass::BlasMode::kSymmetric;
// Define a grouped Rank2K kernel with all template parameters set except
// for scheduling mode. This will be used as the template for all scheduling
// modes executed.
using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped<
ElementA, LayoutA, kTransformA, kAlignmentA,
ElementB, LayoutB, kTransformB, kAlignmentB,
ElementOutput, LayoutC, kFillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
Operator,
kBlasMode>::Rank2Kkernel;
using Rank2KGrouped = cutlass::gemm::device::Rank2KGrouped<Rank2Kkernel>;
// Rank2k operator
using Rank2KConventional = cutlass::gemm::device::Rank2K<
ElementA, LayoutA,
ElementB, LayoutB,
ElementOutput, LayoutC, kFillModeC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kAlignmentA,
kAlignmentB,
kSplitKSerial,
Operator,
kTransformA,
kTransformB,
kBlasMode
>;
//
// Profile it
//
TestbedConventional<Rank2KConventional> testbed(options);
Result result = testbed.profile();
if (!result.passed) {
std::cout << "Profiling CUTLASS conventional Rank2K has failed.\n";
std::cout << "\nFailed\n";
return -1;
}
using GroupScheduleMode = cutlass::gemm::kernel::GroupScheduleMode;
for (GroupScheduleMode mode : options.scheduler_modes) {
Result result;
switch (mode) {
case GroupScheduleMode::kDeviceOnly:
{
TestbedGrouped<Rank2KGrouped, GroupScheduleMode::kDeviceOnly> runner(options);
result = runner.profile();
break;
}
case GroupScheduleMode::kHostPrecompute:
{
TestbedGrouped<Rank2KGrouped, GroupScheduleMode::kHostPrecompute> runner(options);
result = runner.profile();
break;
}
}
if (result.error != cudaSuccess) {
return 1;
}
// Override verbose flag to avoid printing duplicate information for each scheduling mode
options.verbose = false;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/38_syr2k_grouped/syr2k_grouped.cu/0 | {
"file_path": "examples/38_syr2k_grouped/syr2k_grouped.cu",
"repo_id": "examples",
"token_count": 19343
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts. Partial
specializations here choose 'device::GemmTransposed' to implement this functionality.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "fmha_grouped.h"
#include "gemm_kernel_utils.h"
#include "gemm/custom_mma.h"
#include "gemm/find_default_mma.h"
#include "gemm/mma_from_smem.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
// The datatype of Q/K/V
typename scalar_t_,
// Architecture we are targeting (eg `cutlass::arch::Sm80`)
typename ArchTag_,
// If Q/K/V are correctly aligned in memory and we can run a fast kernel
bool isAligned_,
int kQueriesPerBlock,
int kKeysPerBlock,
int kMaxK = (int)cutlass::platform::numeric_limits<uint32_t>::max(),
GroupScheduleMode GroupScheduleMode_ = GroupScheduleMode::kDeviceOnly
>
struct DefaultFMHAGrouped {
using scalar_t = scalar_t_;
using accum_t = float;
using output_t = scalar_t;
// Accumulator between 2 iterations
// Using `accum_t` improves perf on f16 at the cost of
// numerical errors
using output_accum_t = accum_t;
using ArchTag = ArchTag_;
static bool const kIsAligned = isAligned_;
static bool const kSingleValueIteration = kMaxK <= kKeysPerBlock;
static constexpr bool kIsHalf = cutlass::sizeof_bits<scalar_t>::value == 16;
static int const kWarpSize = 32;
static int const kNumWarpsPerBlock = kQueriesPerBlock * kKeysPerBlock / (kWarpSize * kWarpSize);
struct MM0 {
/*
In this first matmul, we compute a block of `Q @ K.T`.
While the calculation result is still hot in registers, we update
`mi`, `m_prime`, `s_prime` in shared-memory, and then store this value
into a shared-memory ("AccumulatorSharedStorage") that is used later as
operand A for the second matmul (see MM1)
*/
using GemmType = gemm_kernel_utils::DefaultGemmType<ArchTag, scalar_t>;
using OpClass = typename GemmType::OpClass;
using ElementA = scalar_t;
using ElementB = scalar_t;
using ElementC = scalar_t;
using ElementAccumulator = accum_t;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::RowMajor;
using DefaultConfig =
typename cutlass::gemm::device::DefaultGemmConfiguration<
OpClass,
ArchTag,
ElementA,
ElementB,
ElementC,
ElementAccumulator
>;
static int const kAlignmentA =
kIsAligned ? DefaultConfig::kAlignmentA : GemmType::kMinimumAlignment;
static int const kAlignmentB =
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment;
using ThreadblockShape = cutlass::gemm::GemmShape<kQueriesPerBlock, kKeysPerBlock, GemmType::ThreadK>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
using InstructionShape = typename GemmType::InstructionShape;
static int const kStages = DefaultConfig::kStages;
using Operator = typename GemmType::Operator;
using DefaultMma = typename cutlass::gemm::threadblock::FindDefaultMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
LayoutC,
OpClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
ArchTag::kMinComputeCapability >= 80 && kIsHalf
? 4
: DefaultConfig::kStages,
Operator
>::DefaultMma;
using MmaCore = typename DefaultMma::MmaCore;
using IteratorA = typename DefaultMma::IteratorA;
using IteratorB = typename DefaultMma::IteratorB;
using DefaultThreadblockMma = typename DefaultMma::ThreadblockMma;
using Mma = typename cutlass::platform::conditional<
kSingleValueIteration,
typename MakeCustomMma<DefaultThreadblockMma, kMaxK>::Mma,
DefaultThreadblockMma>::type;
using AccumLambdaIterator = typename DefaultMmaAccumLambdaIterator<
typename Mma::Operator::IteratorC,
ElementAccumulator,
kWarpSize>::Iterator;
static_assert(MmaCore::WarpCount::kCount == kNumWarpsPerBlock, "");
// Epilogue to store to shared-memory in a format that we can use later for
// the second matmul
using B2bGemm = typename cutlass::gemm::threadblock::B2bGemm<
typename Mma::Operator::IteratorC,
typename Mma::Operator,
scalar_t,
WarpShape,
ThreadblockShape>;
using AccumulatorSharedStorage = typename B2bGemm::AccumulatorSharedStorage;
};
struct MM1 {
/*
Second matmul: perform `attn @ V` where `attn` is the attention (not
normalized) and stored in shared memory
*/
using GemmType = typename MM0::GemmType;
using OpClass = typename GemmType::OpClass;
using ElementA = scalar_t;
using ElementB = scalar_t;
using ElementC = output_accum_t;
using ElementAccumulator = accum_t;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::RowMajor;
using LayoutC = cutlass::layout::RowMajor;
using DefaultConfig =
typename cutlass::gemm::device::DefaultGemmConfiguration<
OpClass,
ArchTag,
ElementA,
ElementB,
ElementC,
ElementAccumulator
>;
static int const kAlignmentA = DefaultConfig::kAlignmentA;
static int const kAlignmentB =
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment;
using ThreadblockShape = typename MM0::ThreadblockShape;
using WarpShape = typename MM0::WarpShape;
using InstructionShape = typename MM0::InstructionShape;
using EpilogueOutputOp = typename DefaultConfig::EpilogueOutputOp;
static int const kStages = DefaultConfig::kStages;
using Operator = typename GemmType::Operator;
using ThreadblockSwizzle = void; // Swizzling is unused
static bool const kSplitKSerial = false;
using DefaultGemm = cutlass::gemm::kernel::DefaultGemm<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OpClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
ArchTag::kMinComputeCapability >= 80 && kIsHalf
? 4
: DefaultConfig::kStages,
kSplitKSerial,
Operator>;
using WarpIteratorA = typename cutlass::gemm::threadblock::
DefaultWarpIteratorAFromSharedMemory<
typename DefaultGemm::Mma::Policy::Operator::Shape, // WarpShape
typename DefaultGemm::Mma::Policy::Operator::InstructionShape,
typename DefaultGemm::Mma::Policy::Operator::IteratorA,
typename DefaultGemm::Mma::Policy>::WarpIterator;
using DefaultMmaFromSmem =
typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory<
typename DefaultGemm::Mma,
MM0::AccumulatorSharedStorage::Shape::kN, // kMaxK
WarpIteratorA,
false>; // kScaleOperandA
using Mma = typename DefaultMmaFromSmem::Mma;
using IteratorB = typename Mma::IteratorB;
using WarpCount = typename Mma::WarpCount;
static_assert(WarpCount::kCount == kNumWarpsPerBlock, "");
using DefaultEpilogue = typename DefaultGemm::Epilogue;
using OutputTileIterator =
typename cutlass::epilogue::threadblock::PredicatedTileIterator<
typename DefaultEpilogue::OutputTileIterator::ThreadMap,
output_t>;
using OutputTileIteratorAccum =
typename cutlass::epilogue::threadblock::PredicatedTileIterator<
typename DefaultEpilogue::OutputTileIterator::ThreadMap,
output_accum_t>;
};
/// Define the kernel in terms of the default kernel
using FMHAKernel = kernel::FMHAGrouped<
MM0,
MM1,
scalar_t,
accum_t,
output_t,
output_accum_t,
kSingleValueIteration,
GroupScheduleMode_
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/default_fmha_grouped.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/default_fmha_grouped.h",
"repo_id": "examples",
"token_count": 3924
} | 5 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tools and utils to store a GEMM output in shmem, and to use that
output as operandA for another GEMM back-to-back
*/
#pragma once
#include "cutlass/aligned_buffer.h"
#include "cutlass/arch/memory.h"
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/platform/platform.h"
#include "cutlass/transform/threadblock/vector_iterator.h"
#include "../epilogue/epilogue_thread_apply_logsumexp.h"
#include "../gemm/mma_accum_lambda_iterator.h"
#include "../gemm_kernel_utils.h"
#include "../iterators/default_warp_iterator_from_smem.h"
#include "../iterators/make_residual_last.h"
#include "../iterators/transpose_warp_iterator.h"
#include "../iterators/warp_iterator_from_smem.h"
#include "cutlass/epilogue/threadblock/epilogue_smem_accumulator.h"
#include "cutlass/gemm/threadblock/mma_base.h"
#include "cutlass/gemm/threadblock/mma_multistage.h"
#include "cutlass/gemm/threadblock/mma_pipelined.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h"
namespace cutlass {
namespace gemm {
namespace threadblock {
/// Shared storage object needed by accumulator
/// From 13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h
template <
typename Shape_,
typename Element_,
typename Layout_,
typename Padding_>
class AccumulatorSharedStorage {
public:
//
// Type definitions
//
using Shape = Shape_;
using Element = Element_;
using Layout = Layout_;
using Padding = Padding_;
/// Tensor reference to the accumulator
using TensorRefAccum = cutlass::TensorRef<Element, Layout>;
/// Shape of the accumulator matrix in shared memory
using ShapeAccum = cutlass::
MatrixShape<Shape::kM + Padding::kRow, Shape::kN + Padding::kColumn>;
public:
//
// Data members
//
/// Buffer for accumulator
cutlass::AlignedBuffer<Element, ShapeAccum::kCount> accum;
public:
//
// Methods
//
/// Returns a layout object for the Accum matrix
CUTLASS_DEVICE
static Layout LayoutAccum() {
return Layout::packed({ShapeAccum::kRow, ShapeAccum::kColumn});
}
/// Returns a TensorRef to the Accumulator
CUTLASS_HOST_DEVICE
TensorRefAccum accum_ref() {
return TensorRefAccum{accum.data(), LayoutAccum()};
}
};
////////////////////////////////////////////////////////////////////////////////
// Taken from
// https://github.com/NVIDIA/cutlass/blob/master/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
// Maximum K dimension - also the dimension of the shared-memory
// holding `OperandA`
int kMaxK_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Layout in shared-memory of operand A
typename SmemLayoutA,
/// Used for partial specialization
typename Enable = bool>
class MmaBaseFromSharedMemory {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
static constexpr int kMaxK = kMaxK_;
///< Policy describing tuning details
using Policy = Policy_;
//
// Dependent types
//
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm = typename Policy::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount = GemmShape<
Shape::kM / WarpGemm::kM,
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;
using WarpCount1 = WarpCount;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator::Policy::MmaShape::kK);
static int const kWarpGemmIterations1 = kWarpGemmIterations;
/// Number of stages
static int const kStages = Stages;
/// If this is true, we fill the entire shmem buffer at start
/// and don't need to iterate through it in a circular fashion
static bool const kSmemContainsEntireB = kMaxK <= Shape::kK * kStages;
/// Tensor reference to the A operand
using TensorRefA = TensorRef<typename Operator::ElementA, SmemLayoutA>;
/// Tensor reference to the B operand
using TensorRefB =
TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
class SharedStorage {
public:
//
// Type definitions
//
/// Shape of the B matrix operand in shared memory
using ShapeB = MatrixShape<
Shape::kK * kStages + Policy::SmemPaddingB::kRow,
Shape::kN + Policy::SmemPaddingB::kColumn>;
public:
//
// Data members
//
/// Buffer for B operand
AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;
public:
//
// Methods
//
/// Returns a layout object for the B matrix
CUTLASS_HOST_DEVICE
static typename Operator::LayoutB LayoutB() {
return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});
}
/// Returns a TensorRef to the B operand
CUTLASS_HOST_DEVICE
TensorRefB operand_B_ref() {
return TensorRefB{operand_B.data(), LayoutB()};
}
};
protected:
//
// Data members
//
// /// Iterator to load a warp-scoped tile of A operand from shared memory
// typename Operator::IteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of B operand from shared memory
typename Operator::IteratorB warp_tile_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
MmaBaseFromSharedMemory(
///< Shared storage needed for internal use by threadblock-scoped GEMM
TensorRefB& b_tile,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: warp_tile_iterator_B_(b_tile, lane_idx) {}
};
namespace {
// has necessary trait compliance with WarpIteratorFromSmem but doesn't do
// anything, can be default initialized, and uses fragment that takes up
// (almost) no space. this warp iterator is selected at compile time when
// elementwise on-the-fly scaling for operand A is disabled, in which case
// operations related to loading scale factors for operand A get wiped out by
// the compiler.
template <typename TensorRef>
class NoOpWarpIteratorScale {
public:
// in pipelined+multistage MMA implementations we keep an array of fragments.
// if we aren't using scaling we don't want to waste registers on fragments
// of scale elements, so ideally this would be sized 0.
// Since arrays of zero-sized objects are not allowed, using size as 1.
// The compiler will most likely wipe it out anyways.
using Fragment = cutlass::Array<char, 1>;
CUTLASS_HOST_DEVICE
NoOpWarpIteratorScale() {}
CUTLASS_HOST_DEVICE
NoOpWarpIteratorScale(TensorRef const&, int) {}
CUTLASS_HOST_DEVICE
NoOpWarpIteratorScale& add_tile_offset(
typename TensorRef::TensorCoord const&) {
return *this;
}
CUTLASS_HOST_DEVICE
NoOpWarpIteratorScale& operator++() {
return *this;
}
CUTLASS_DEVICE
void load(Fragment&) const {}
};
// if scaling is enabled, performs fragment elementwise multiplication between
// fragment and its scaling factor.
template <typename Fragment, typename FragmentScale, bool ScalingEnabled>
class FragmentElementwiseScaler;
// specialization for scaling being enabled.
template <typename Fragment, typename FragmentScale>
class FragmentElementwiseScaler<Fragment, FragmentScale, true> {
public:
// cast scale_frag to correct type then apply elementwise to fragment
CUTLASS_DEVICE
static Fragment apply(Fragment frag, FragmentScale const& scale_frag) {
Fragment converted_scale_frag = cutlass::NumericArrayConverter<
typename Fragment::Element,
typename FragmentScale::Element,
FragmentScale::kElements>()(scale_frag);
return cutlass::multiplies<Fragment>()(frag, converted_scale_frag);
}
};
// specialization for scaling being disabled. doesn't do anything and should
// just get wiped out by the compiler.
template <typename Fragment, typename FragmentScale>
class FragmentElementwiseScaler<Fragment, FragmentScale, false> {
public:
CUTLASS_DEVICE
static Fragment apply(Fragment frag, FragmentScale const&) {
return frag;
}
};
} // namespace
////////////////////////////////////////////////////////////////////////////////
// Taken from
// https://github.com/NVIDIA/cutlass/blob/master/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_pipelined_smem_accumulator.h
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
// BEGIN smem
/// Iterates over the intermediate accumulator tile in shared memory
typename WarpIteratorA_,
/// whether or not to perform elementwise multiplication of A
// by another matrix (A_scale) that is also kept in shared memory prior
// to matmul A @ B
bool ScaleOperandA_,
/// Max GEMM problem size in K dimension
int MaxK,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Transformation applied to B operand
typename TransformB_ = NumericArrayConverter<
typename SmemIteratorB_::Element,
typename IteratorB_::Element,
IteratorB_::Fragment::kElements>,
/// Used for partial specialization
typename Enable = bool>
class MmaPipelinedFromSharedMemory : public MmaBaseFromSharedMemory<
Shape_,
MaxK,
Policy_,
2,
typename WarpIteratorA_::Layout> {
public:
///< Base class
using Base = MmaBaseFromSharedMemory<
Shape_,
MaxK,
Policy_,
2,
typename WarpIteratorA_::Layout>;
using Shape =
Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
static constexpr bool ScaleOperandA = ScaleOperandA_;
using WarpIteratorA = WarpIteratorA_;
///< loads fragments of A_scale from shared memory if operand A scaling is
///< enabled. otherwise no-op.
using WarpIteratorAScale = typename cutlass::platform::conditional<
ScaleOperandA,
WarpIteratorA,
NoOpWarpIteratorScale<typename WarpIteratorA::TensorRef>>::type;
using IteratorB =
IteratorB_; ///< Iterates over tiles of B operand in global memory
using ElementC = ElementC_; ///< Data type of accumulator matrix
using LayoutC = LayoutC_; ///< Layout of accumulator matrix
using Policy = Policy_; ///< Policy describing tuning details
using SmemIteratorB = SmemIteratorB_;
using TransformB = TransformB_;
//
// Dependent types
//
/// Fragment of operand B loaded from global memory
using FragmentB = typename IteratorB::Fragment;
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Obtain the arch tag from the warp-level operator
using ArchTag = typename Policy::Operator::ArchTag;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
static_assert(
(Base::kStages == 2),
"MmaPipelined requires kStages set to value 2");
private:
using WarpFragmentA = typename Operator::FragmentA;
/// fragment type of OperandA elementwise scaling matrix. (almost) empty
/// if operand A scaling is disabled.
using WarpFragmentAScale = typename WarpIteratorAScale::Fragment;
using WarpFragmentB = typename Operator::FragmentB;
/// applies scaling factor to operand A fragment if operand A scaling is
/// enabled. otherwise no-op.
using FragmentAScaler = FragmentElementwiseScaler<
WarpFragmentA,
WarpFragmentAScale,
ScaleOperandA>;
protected:
// /// Iterator to write threadblock-scoped tile of A operand to shared memory
// SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
/// Iterator to load a warp-scoped tile of A operand from intermediate
/// accumulator tile
WarpIteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of A_scale from intermediate
/// accumulator tile (only used if ScaleOperandA_ is true)
WarpIteratorAScale warp_tile_iterator_A_scale_;
public:
/// constructor for MMA with operand A scaling enabled.
CUTLASS_DEVICE
MmaPipelinedFromSharedMemory(
typename Base::TensorRefA a, // Operand A in shared memory
typename Base::TensorRefA a_scale, // Operand A_scale in shared memory
typename Base::TensorRefB
b_staging, // staging memory for loading tiles of B
int thread_idx,
int warp_idx,
int lane_idx)
: Base(b_staging, thread_idx, warp_idx, lane_idx),
warp_tile_iterator_A_(a, lane_idx),
warp_tile_iterator_A_scale_(a_scale, lane_idx),
smem_iterator_B_(b_staging, thread_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_A_scale_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
/// Construct from tensor references
CUTLASS_DEVICE
MmaPipelinedFromSharedMemory(
typename Base::TensorRefA a, ///< Operand A in shared memory
typename Base::TensorRefB b_staging, ///< staging memory for loading B
int thread_idx, ///< ID within the threadblock
int warp_idx, ///< ID of warp
int lane_idx) ///< ID of each thread within a warp
: Base(b_staging, thread_idx, warp_idx, lane_idx),
warp_tile_iterator_A_(a, lane_idx),
smem_iterator_B_(b_staging, thread_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
// For API compatibility with MmaMultistageFromSharedMemory
// but not supported as it worsens perf: older gpus < sm80 don't
// support async tranfers and have to waste registers
CUTLASS_DEVICE
void set_prologue_done(bool value) {}
CUTLASS_DEVICE
static void prologue(
typename Base::SharedStorage& shared_storage,
IteratorB iterator_B1,
int thread_idx,
int problem_size_0_n) {}
CUTLASS_DEVICE
static void drain_cp_asyncs() {}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
int gemm_k_iterations, ///< number of iterations of the mainloop
FragmentC& accum, ///< destination accumulator tile
// IteratorA iterator_A, ///< iterator over A
// operand in global memory
IteratorB iterator_B, ///< iterator over B operand in global memory
FragmentC const& src_accum, ///< source accumulator tile
// TransformA transform_A = TransformA(), ///< transformation
// applied to A fragment
TransformB transform_B =
TransformB()) { ///< transformation applied to B fragment
//
// Prologue
//
// Perform accumulation in the 'd' output operand
accum = src_accum;
FragmentB tb_frag_B;
tb_frag_B.clear();
// The last kblock is loaded in the prolog
iterator_B.set_residual_tile(gemm_k_iterations == 1);
iterator_B.load(tb_frag_B);
++iterator_B;
this->smem_iterator_B_.store(transform_B(tb_frag_B));
++this->smem_iterator_B_;
__syncthreads();
// remember that WarpFragmentAScale and WarpIteratorAScale are empty/no-op
// if scaling is disabled.
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpFragmentA warp_frag_A[2];
WarpFragmentAScale warp_frag_A_scale[2];
WarpFragmentB warp_frag_B[2];
warp_frag_A[0].clear();
warp_frag_A_scale[0].clear();
warp_frag_B[0].clear();
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_A[0]);
this->warp_tile_iterator_A_scale_.load(warp_frag_A_scale[0]);
this->warp_tile_iterator_B_.load(warp_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_A_scale_;
++this->warp_tile_iterator_B_;
Operator warp_mma;
int smem_write_stage_idx = 1;
// Avoid reading out of bounds
iterator_B.set_residual_tile(gemm_k_iterations == 2);
iterator_B.clear_mask(gemm_k_iterations <= 1);
// Issue loads during the first warp-level matrix multiply-add *AFTER*
// issuing shared memory loads (which have the tightest latency
// requirement).
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
bool hasNext = true;
if (warp_mma_k == Base::kWarpGemmIterations - 1) {
if (gemm_k_iterations > 1) {
// Write fragments to shared memory
this->smem_iterator_B_.store(transform_B(tb_frag_B));
}
__syncthreads();
++this->smem_iterator_B_;
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory SMEM: Don't reset iterator A, as
// we are continuing our iteration at this point
if (smem_write_stage_idx == 1) {
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
} else {
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
}
smem_write_stage_idx ^= 1;
hasNext = gemm_k_iterations > 1;
}
// Only read the next if we need to
if (hasNext) {
this->warp_tile_iterator_B_.set_kgroup_index(
(warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_A_scale_.load(
warp_frag_A_scale[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_A_scale_;
++this->warp_tile_iterator_B_;
if (warp_mma_k == 0) {
iterator_B.load(tb_frag_B);
++iterator_B;
// Avoid reading out of bounds if this was the last loop iteration
iterator_B.set_residual_tile(gemm_k_iterations == 3);
iterator_B.clear_mask(gemm_k_iterations <= 2);
}
}
warp_mma(
accum,
FragmentAScaler::apply(
warp_frag_A[warp_mma_k % 2], warp_frag_A_scale[warp_mma_k % 2]),
warp_frag_B[warp_mma_k % 2],
accum);
}
}
}
};
////////////////////////////////////////////////////////////////////////////////
// Taken from
// https://github.com/NVIDIA/cutlass/blob/master/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_multistage_smem_accumulator.h
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape1_,
/// Iterates over the intermediate accumulator tile in shared memory
typename WarpIteratorA1_,
/// whether or not to perform elementwise multiplication of A
// by another matrix (A_scale) that is also kept in shared memory prior
// to matmul A @ B
bool ScaleOperandA_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB1_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB1_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB1,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy1_,
/// Number of stages,
int Stages_,
int kMaxK_,
/// Used for partial specialization
typename Enable = bool>
class MmaMultistageFromSharedMemory : public MmaBaseFromSharedMemory<
Shape1_,
kMaxK_,
Policy1_,
Stages_,
typename WarpIteratorA1_::Layout> {
public:
///< Base class
using Base = MmaBaseFromSharedMemory<
Shape1_,
kMaxK_,
Policy1_,
Stages_,
typename WarpIteratorA1_::Layout>;
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape1 = Shape1_;
///< Iterates over tiles of B operand in global memory
using IteratorB1 = IteratorB1_;
using IteratorB = IteratorB1;
///< Policy describing tuning details
using Policy1 = Policy1_;
using SmemIteratorB1 = SmemIteratorB1_;
using WarpIteratorA1 = WarpIteratorA1_; ///< Iterates over the intermediate
///< accumulator tile in shared memory
static constexpr bool ScaleOperandA = ScaleOperandA_;
///< warp level iterator over A_scale matrix tile kept in shared memory.
///< if elementwise A scaling is disabled then everything this does is no-op.
using WarpIteratorAScale = typename cutlass::platform::conditional<
ScaleOperandA,
WarpIteratorA1,
NoOpWarpIteratorScale<typename WarpIteratorA1::TensorRef>>::type;
///< Data type of accumulator matrix
using ElementC = ElementC_;
///< Layout of accumulator matrix
using LayoutC = LayoutC_;
static cutlass::arch::CacheOperation::Kind const kCacheOpB1 = CacheOpB1;
static constexpr bool kSmemContainsEntireB = Base::kSmemContainsEntireB;
//
// Dependent types
//
/// Fragment of accumulator tile
using FragmentC1 = typename Policy1::Operator::FragmentC;
using FragmentC = FragmentC1;
/// Warp-level Mma
using Operator1 = typename Policy1::Operator;
/// Minimum architecture is Sm80 to support cp.async
using ArchTag = arch::Sm80;
/// Complex transform on B operand
static ComplexTransform const kTransformB1 = Operator1::kTransformB;
/// Internal structure exposed for introspection.
struct Detail {
static_assert(
Base::kWarpGemmIterations1 > 1,
"The pipelined structure requires at least two warp-level "
"GEMM operations.");
/// Number of cp.async instructions to load one stage of operand B
static int const TBLoadIterationsB1 =
IteratorB1::ThreadMap::Iterations::kCount;
/// Number of cp.async instructions to load on group of operand B
static int const kAccessesPerGroupB1 =
(TBLoadIterationsB1 + Base::kWarpGemmIterations1 - 1) /
Base::kWarpGemmIterations1;
};
static constexpr int kNumStagesConcurrentLoad =
kSmemContainsEntireB ? Base::kStages : Base::kStages - 1;
private:
using WarpLoadedFragmentA1 = typename Operator1::FragmentA;
/// fragment of OperandA scale matrix. if operand A scaling is disabled this
/// is (almost) empty.
using WarpLoadedFragmentA1Scale = typename WarpIteratorAScale::Fragment;
using WarpLoadedFragmentB1 = typename Operator1::FragmentB;
using WarpTransformedFragmentA1 = typename Operator1::TransformedFragmentA;
using WarpTransformedFragmentB1 = typename Operator1::TransformedFragmentB;
/// applies elementwise scaling to fragment of A. if operand A scaling is
/// disabled this is a no-op.
using FragmentAScaler = FragmentElementwiseScaler<
WarpLoadedFragmentA1,
WarpLoadedFragmentA1Scale,
ScaleOperandA>;
private:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A1 operand from intermediate
/// accumulator tile
WarpIteratorA1 warp_tile_iterator_A1_;
/// Iterator to load a warp-scoped tile of A1_scale operand from shared memory
/// if operand A scaling is disabled everything this does is a no-op.
WarpIteratorAScale warp_tile_iterator_A1_scale_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB1 smem_iterator_B1_;
bool prologue_done_;
public:
/// constructor for MMA with operand A scaling enabled.
CUTLASS_DEVICE
MmaMultistageFromSharedMemory(
typename Base::TensorRefA a,
typename Base::TensorRefA a_scale,
typename Base::TensorRefB b_tile,
int thread_idx,
int warp_idx,
int lane_idx)
: Base(b_tile, thread_idx, warp_idx, lane_idx),
warp_tile_iterator_A1_(a, lane_idx),
warp_tile_iterator_A1_scale_(a_scale, lane_idx),
smem_iterator_B1_(b_tile, thread_idx),
prologue_done_(false) {
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn_1 =
warp_idx % (Base::WarpCount1::kM * Base::WarpCount1::kN);
int warp_idx_k_1 = warp_idx / (Base::WarpCount1::kM * Base::WarpCount1::kN);
int warp_idx_m_1 = warp_idx_mn_1 % Base::WarpCount1::kM;
int warp_idx_n_1 = warp_idx_mn_1 / Base::WarpCount1::kM;
// Add per-warp offsets in units of warp-level tiles
warp_tile_iterator_A1_.add_tile_offset(
{warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1});
warp_tile_iterator_A1_scale_.add_tile_offset(
{warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations1 * warp_idx_k_1, warp_idx_n_1});
}
/// Construct from tensor references
CUTLASS_DEVICE
MmaMultistageFromSharedMemory(
typename Base::TensorRefA a,
typename Base::TensorRefB b_tile,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: Base(b_tile, thread_idx, warp_idx, lane_idx),
warp_tile_iterator_A1_(a, lane_idx),
smem_iterator_B1_(b_tile, thread_idx),
prologue_done_(false) {
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn_1 =
warp_idx % (Base::WarpCount1::kM * Base::WarpCount1::kN);
int warp_idx_k_1 = warp_idx / (Base::WarpCount1::kM * Base::WarpCount1::kN);
int warp_idx_m_1 = warp_idx_mn_1 % Base::WarpCount1::kM;
int warp_idx_n_1 = warp_idx_mn_1 / Base::WarpCount1::kM;
// Add per-warp offsets in units of warp-level tiles
warp_tile_iterator_A1_.add_tile_offset(
{warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations1 * warp_idx_k_1, warp_idx_n_1});
}
CUTLASS_DEVICE
void set_prologue_done(bool value) {
prologue_done_ = value;
}
CUTLASS_DEVICE
static void prologue(
typename Base::SharedStorage& shared_storage,
IteratorB iterator_B1,
int thread_idx,
int problem_size_0_n) {
SmemIteratorB1 smem_iterator_B1(shared_storage.operand_B_ref(), thread_idx);
_prologue(
iterator_B1,
(problem_size_0_n + Base::Shape::kK - 1) / Base::Shape::kK,
smem_iterator_B1);
}
CUTLASS_DEVICE
static void drain_cp_asyncs() {
// commit and drain all pending and predicated cp.async pnz from the GEMM
// mainloop
cutlass::arch::cp_async_fence();
cutlass::arch::cp_async_wait<0>();
__syncthreads();
}
CUTLASS_DEVICE
void copy_tiles_and_advance_1(
IteratorB1& iterator_B1,
int group_start_B1 = 0) {
iterator_B1.set_iteration_index(
group_start_B1 * IteratorB1::kAccessesPerVector);
this->smem_iterator_B1_.set_iteration_index(group_start_B1);
// Load for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::kAccessesPerGroupB1; ++j) {
if (group_start_B1 + j < Detail::TBLoadIterationsB1) {
typename IteratorB1::AccessType* dst_ptr =
reinterpret_cast<typename IteratorB1::AccessType*>(
this->smem_iterator_B1_.get());
int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value *
IteratorB1::ThreadMap::kElementsPerAccess /
IteratorB1::kAccessesPerVector / 8;
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) {
auto gmem_ptr = iterator_B1.get();
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>(
dst_ptr + v, gmem_ptr, iterator_B1.valid());
++iterator_B1;
}
++this->smem_iterator_B1_;
}
}
}
CUTLASS_DEVICE
static void _prologue(
IteratorB& iterator_B1,
int32_t gemm_k_iterations_1,
SmemIteratorB1& smem_iterator_B1_) {
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < kNumStagesConcurrentLoad;
++stage, --gemm_k_iterations_1) {
iterator_B1.set_residual_tile(gemm_k_iterations_1 == 1);
iterator_B1.clear_mask(gemm_k_iterations_1 == 0);
iterator_B1.set_iteration_index(0);
smem_iterator_B1_.set_iteration_index(0);
// Load for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsB1; ++j) {
typename IteratorB1::AccessType* dst_ptr =
reinterpret_cast<typename IteratorB1::AccessType*>(
smem_iterator_B1_.get());
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) {
int const kSrcBytes =
sizeof_bits<typename IteratorB1::Element>::value *
IteratorB1::ThreadMap::kElementsPerAccess /
IteratorB1::kAccessesPerVector / 8;
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>(
dst_ptr + v, iterator_B1.get(), iterator_B1.valid());
++iterator_B1;
}
++smem_iterator_B1_;
}
// Move to the next stage
iterator_B1.add_tile_offset({1, 0});
smem_iterator_B1_.add_tile_offset({1, 0});
// Defines the boundary of a stage of cp.async.
cutlass::arch::cp_async_fence();
}
iterator_B1.set_residual_tile(gemm_k_iterations_1 == 1);
iterator_B1.clear_mask(gemm_k_iterations_1 == 0);
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
///< problem size of GEMM
int gemm_k_iterations_1_,
///< destination accumulator tile
FragmentC1& accum,
///< iterator over B1 operand in global memory
IteratorB1 iterator_B1,
///< initial value of accumulator
FragmentC1 const& src_accum) {
// 2nd Gemm
//
// Prologue
//
// Perform accumulation in the 'd' output operand
accum = src_accum;
if (!prologue_done_) {
_prologue(iterator_B1, gemm_k_iterations_1_, smem_iterator_B1_);
} else if (!kSmemContainsEntireB) {
// Restore the iterators increments
int gemm_k_iterations_1 = gemm_k_iterations_1_;
// Issue several complete stages
CUTLASS_PRAGMA_UNROLL
for (int stage = 0; stage < kNumStagesConcurrentLoad;
++stage, --gemm_k_iterations_1) {
iterator_B1.set_iteration_index(0);
this->smem_iterator_B1_.set_iteration_index(0);
// Load for operand B
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < Detail::TBLoadIterationsB1; ++j) {
CUTLASS_PRAGMA_UNROLL
for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) {
++iterator_B1;
}
++this->smem_iterator_B1_;
}
iterator_B1.add_tile_offset({1, 0});
this->smem_iterator_B1_.add_tile_offset({1, 0});
}
iterator_B1.set_residual_tile(gemm_k_iterations_1 <= 1);
iterator_B1.clear_mask(gemm_k_iterations_1 <= 0);
}
// DEPBAR+SYNC
cutlass::arch::cp_async_wait<kNumStagesConcurrentLoad - 1>();
__syncthreads();
// remember that WarpFragmentAScale and WarpIteratorAScale are no-op/empty
// if scaling is disabled.
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpLoadedFragmentA1 warp_loaded_frag_A1[2];
WarpLoadedFragmentA1Scale warp_loaded_frag_A1_scale[2];
WarpLoadedFragmentB1 warp_loaded_frag_B1[2];
WarpTransformedFragmentA1 warp_transformed_frag_A1[2];
WarpTransformedFragmentB1 warp_transformed_frag_B1[2];
Operator1 warp_mma1;
warp_tile_iterator_A1_.load(warp_loaded_frag_A1[0]);
++warp_tile_iterator_A1_;
warp_tile_iterator_A1_scale_.load(warp_loaded_frag_A1_scale[0]);
++warp_tile_iterator_A1_scale_;
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_B_.load(warp_loaded_frag_B1[0]);
++this->warp_tile_iterator_B_;
int smem_write_stage_idx = Base::kStages - 1;
int smem_read_stage_idx = 0;
warp_mma1.transform(
warp_transformed_frag_A1[0],
warp_transformed_frag_B1[0],
FragmentAScaler::apply(
warp_loaded_frag_A1[0], warp_loaded_frag_A1_scale[0]),
warp_loaded_frag_B1[0]);
// tf32x3 kernels use staging accumulation. warp_mma uses a temporary
// accumulator and this temporary accumulator is added to the final
// accumulator once in every mainloop iteration.
plus<FragmentC1> plus_accum;
FragmentC1 tmp_accum;
if (platform::is_same<
typename Operator1::MathOperator,
arch::OpMultiplyAddFastF32>::value ||
platform::is_same<
typename Operator1::MathOperator,
arch::OpMultiplyAddComplexFastF32>::value) {
tmp_accum.clear();
}
//
// Mainloop
//
CUTLASS_PRAGMA_UNROLL
for (int gemm_k_iterations_1 = gemm_k_iterations_1_ - (Base::kStages - 1);
gemm_k_iterations_1 > (-Base::kStages + 1);
gemm_k_iterations_1--) {
//
// Loop over GEMM K dimension
//
// Computes a warp-level GEMM on data held in shared memory
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1;
++warp_mma_k) {
// Load warp-level tile from accumulator fragment (A)
// or shared memory (operand B)
this->warp_tile_iterator_B_.set_kgroup_index(
(warp_mma_k + 1) % Base::kWarpGemmIterations1);
// skip warp tile loading for the last kgroup (we are out of the buf)
if (gemm_k_iterations_1 > (-Base::kStages + 2) ||
warp_mma_k < Base::kWarpGemmIterations1 - 1) {
warp_tile_iterator_A1_.load(
warp_loaded_frag_A1[(warp_mma_k + 1) % 2]);
warp_tile_iterator_A1_scale_.load(
warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(
warp_loaded_frag_B1[(warp_mma_k + 1) % 2]);
}
++warp_tile_iterator_A1_;
++warp_tile_iterator_A1_scale_;
++this->warp_tile_iterator_B_;
if (warp_mma_k > 0)
warp_mma1.transform(
warp_transformed_frag_A1[warp_mma_k % 2],
warp_transformed_frag_B1[warp_mma_k % 2],
FragmentAScaler::apply(
warp_loaded_frag_A1[warp_mma_k % 2],
warp_loaded_frag_A1_scale[warp_mma_k % 2]),
warp_loaded_frag_B1[warp_mma_k % 2]);
if (platform::is_same<
typename Operator1::MathOperator,
arch::OpMultiplyAddFastF32>::value ||
platform::is_same<
typename Operator1::MathOperator,
arch::OpMultiplyAddComplexFastF32>::value) {
warp_mma1(
tmp_accum,
warp_transformed_frag_A1[warp_mma_k % 2],
warp_transformed_frag_B1[warp_mma_k % 2],
tmp_accum);
if (warp_mma_k == 0) {
accum = plus_accum(accum, tmp_accum);
tmp_accum.clear();
}
} else {
warp_mma1(
accum,
warp_transformed_frag_A1[warp_mma_k % 2],
warp_transformed_frag_B1[warp_mma_k % 2],
accum);
}
// Issue global->shared copies for the this stage
if (warp_mma_k < Base::kWarpGemmIterations1 - 1) {
int group_start_iteration_B1;
group_start_iteration_B1 = warp_mma_k * Detail::kAccessesPerGroupB1;
if (!kSmemContainsEntireB) {
copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1);
}
}
if (warp_mma_k + 2 == Base::kWarpGemmIterations1) {
int group_start_iteration_B1;
group_start_iteration_B1 =
(warp_mma_k + 1) * Detail::kAccessesPerGroupB1;
if (!kSmemContainsEntireB) {
copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1);
}
// Inserts a memory fence between stages of cp.async instructions.
cutlass::arch::cp_async_fence();
// Waits until kStages-2 stages have committed.
arch::cp_async_wait<kNumStagesConcurrentLoad - 1>();
__syncthreads();
// Move to the next stage
iterator_B1.add_tile_offset({1, 0});
this->smem_iterator_B1_.add_tile_offset({1, 0});
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (!kSmemContainsEntireB) {
if (smem_write_stage_idx == (Base::kStages - 1)) {
this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0});
smem_write_stage_idx = 0;
} else {
++smem_write_stage_idx;
}
if (smem_read_stage_idx == (Base::kStages - 1)) {
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy1::kPartitionsK *
Base::kWarpGemmIterations1,
0});
smem_read_stage_idx = 0;
} else {
++smem_read_stage_idx;
}
}
iterator_B1.set_residual_tile(gemm_k_iterations_1 == 2);
iterator_B1.clear_mask(gemm_k_iterations_1 == 1);
}
// Do any conversions feeding the first stage at the end of the loop so
// we can start right away on mma instructions
if (warp_mma_k + 1 == Base::kWarpGemmIterations1)
warp_mma1.transform(
warp_transformed_frag_A1[(warp_mma_k + 1) % 2],
warp_transformed_frag_B1[(warp_mma_k + 1) % 2],
FragmentAScaler::apply(
warp_loaded_frag_A1[(warp_mma_k + 1) % 2],
warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2]),
warp_loaded_frag_B1[(warp_mma_k + 1) % 2]);
}
}
if (platform::is_same<
typename Operator1::MathOperator,
arch::OpMultiplyAddFastF32>::value ||
platform::is_same<
typename Operator1::MathOperator,
arch::OpMultiplyAddComplexFastF32>::value) {
accum = plus_accum(accum, tmp_accum);
}
}
};
// Converts a "regular" Mma into their counterpart from shared memory
template <
typename Mma_,
int kMaxK,
typename WarpIteratorA_,
/// whether or not to apply elementwise multiplication of operand A by
/// another matrix in shared memory before usage in A @ B
bool kScaleOperandA,
bool kTransposeA = false>
struct DefaultMmaFromSharedMemory;
// Mma pipelined
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
typename WarpIteratorA_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Transformation applied to A operand
typename TransformA_,
/// Transformation applied to B operand
typename TransformB_,
// Max MMA problem size K
int kMaxK,
/// whether or not to apply elementwise multiplication of operand A by
/// another matrix in shared memory before usage in A @ B
bool kScaleOperandA,
bool kTransposeA>
struct DefaultMmaFromSharedMemory<
MmaPipelined<
Shape_,
IteratorA_,
SmemIteratorA_,
IteratorB_,
SmemIteratorB_,
ElementC_,
LayoutC_,
Policy_,
TransformA_,
TransformB_>,
kMaxK,
WarpIteratorA_,
kScaleOperandA,
kTransposeA> {
using RegularMma = MmaPipelined<
Shape_,
IteratorA_,
SmemIteratorA_,
IteratorB_,
SmemIteratorB_,
ElementC_,
LayoutC_,
Policy_,
TransformA_,
TransformB_>;
using WarpShape = typename Policy_::Operator::Shape;
using InstructionShape = typename Policy_::Operator::InstructionShape;
using ArchMmaOperator = typename Policy_::Operator;
static constexpr bool kIsTransposedA = false;
using WarpIteratorA = WarpIteratorA_;
using IteratorB =
typename cutlass::transform::threadblock::MakeIteratorResidualLast<
IteratorB_>::Iterator;
using Mma = typename cutlass::gemm::threadblock::MmaPipelinedFromSharedMemory<
Shape_,
WarpIteratorA,
kScaleOperandA,
kMaxK,
IteratorB,
SmemIteratorB_,
ElementC_,
LayoutC_,
Policy_>;
};
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
typename WarpIteratorA_,
/// Cache operation for operand A
cutlass::arch::CacheOperation::Kind CacheOpA,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Cache operation for operand B
cutlass::arch::CacheOperation::Kind CacheOpB,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Use zfill or predicate for out-of-bound cp.async
SharedMemoryClearOption SharedMemoryClear,
int kMaxK,
/// whether or not to apply elementwise multiplication of operand A by
/// another matrix in shared memory before usage in A @ B
bool kScaleOperandA,
bool kTransposeA>
struct DefaultMmaFromSharedMemory<
MmaMultistage<
Shape_,
IteratorA_,
SmemIteratorA_,
CacheOpA,
IteratorB_,
SmemIteratorB_,
CacheOpB,
ElementC_,
LayoutC_,
Policy_,
Stages,
SharedMemoryClear>,
kMaxK,
WarpIteratorA_,
kScaleOperandA,
kTransposeA> {
using RegularMma = MmaMultistage<
Shape_,
IteratorA_,
SmemIteratorA_,
CacheOpA,
IteratorB_,
SmemIteratorB_,
CacheOpB,
ElementC_,
LayoutC_,
Policy_,
Stages,
SharedMemoryClear>;
using WarpShape = typename Policy_::Operator::Shape;
using InstructionShape = typename Policy_::Operator::InstructionShape;
using WarpIteratorTranspose = TransposeWarpIterator<WarpIteratorA_>;
static constexpr bool kIsTransposedA =
WarpIteratorTranspose::kSupportsTranspose && kTransposeA;
using WarpIteratorA = typename platform::conditional<
kIsTransposedA,
typename WarpIteratorTranspose::Iterator,
WarpIteratorA_>::type;
// Reduce the number of stages if we don't need that many
static int constexpr kStagesMax =
(kMaxK + int(Shape_::kK) - 1) / int(Shape_::kK);
static int constexpr kStages = cutlass::const_min(Stages, kStagesMax);
using IteratorB =
typename cutlass::transform::threadblock::MakeIteratorResidualLast<
IteratorB_>::Iterator;
using Mma =
typename cutlass::gemm::threadblock::MmaMultistageFromSharedMemory<
Shape_,
WarpIteratorA,
kScaleOperandA,
IteratorB,
SmemIteratorB_,
RegularMma::kCacheOpB,
ElementC_,
LayoutC_,
Policy_,
kStages,
kMaxK>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename IteratorC,
typename Operator,
typename scalar_t,
typename WarpShape_,
typename ThreadblockShape_>
struct B2bGemm;
// Tensor Cores >= Sm75 specialization (Ampere ...)
template < /// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_,
typename Operator,
typename scalar_t,
typename WarpShape_,
typename ThreadblockShape_>
struct B2bGemm<
cutlass::gemm::warp::MmaTensorOpAccumulatorTileIterator<
Shape_,
Element_,
Layout_,
InstructionShape_,
OpDelta_>,
Operator,
scalar_t,
WarpShape_,
ThreadblockShape_> {
using IteratorC =
typename cutlass::gemm::warp::MmaTensorOpAccumulatorTileIterator<
Shape_,
Element_,
Layout_,
InstructionShape_,
OpDelta_>;
using FragmentC = typename IteratorC::Fragment;
using InstructionShape = InstructionShape_;
using WarpShape = WarpShape_;
using ThreadblockShape = ThreadblockShape_;
using accum_t = Element_;
using lse_scalar_t = float;
using SmemAccumulatorLayout = cutlass::layout::RowMajor;
// Iterator to load accumulators (results of matmul in registers)
using FragmentIteratorAccumulator =
cutlass::epilogue::warp::FragmentIteratorTensorOp<
WarpShape,
InstructionShape,
accum_t,
typename Operator::Policy::Operator::FragmentC,
cutlass::layout::RowMajor>;
// Iterator to store to shared-memory
using SmemIteratorD0 = typename cutlass::epilogue::warp::TileIteratorTensorOp<
WarpShape,
InstructionShape,
scalar_t, // accum_t,
SmemAccumulatorLayout>;
using AccumulatorSharedStorage =
cutlass::gemm::threadblock::AccumulatorSharedStorage<
ThreadblockShape,
typename SmemIteratorD0::Element,
typename SmemIteratorD0::TensorLayout,
typename SmemIteratorD0::Padding>;
// We need to provide an operation for the epilogue. Let's create an
// operation that does nothing (ScaleType::Nothing), just converts
// from accum_t (float) -> scalar_t (can be half)
using OutputOpNoOp = cutlass::epilogue::thread::LinearCombination<
typename SmemIteratorD0::Element, // ElementOutput
FragmentIteratorAccumulator::Fragment::kElements,
accum_t, // ElementAccumulator
typename SmemIteratorD0::Element, // ElementCompute
cutlass::epilogue::thread::ScaleType::Nothing>;
using Epilogue = cutlass::epilogue::threadblock::EpilogueSmemAccumulator<
SmemIteratorD0,
FragmentIteratorAccumulator,
SmemIteratorD0, // ScaleBiasIterator - not used
OutputOpNoOp>;
// Epilogue 2: with LSE (for backwards pass)
static int const kElementsPerAccess = 2; // TODO: Why 2?
using IteratorAccumulatorLSE =
cutlass::transform::threadblock::VectorIterator<
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
// Shape
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kN>,
// WarpShape
cutlass::MatrixShape<WarpShape::kM, WarpShape::kN>,
lse_scalar_t,
cutlass::layout::RowMajor,
kElementsPerAccess>>;
using EpilogueOpApplyLSE = cutlass::epilogue::thread::ApplyLogSumExp<
scalar_t, // ElementOutput_
lse_scalar_t, // ElementLSE_
accum_t, // ElementAccumulator_
accum_t, // ElementCompute_
128 / cutlass::sizeof_bits<scalar_t>::value
// FragmentIteratorAccumulator::Fragment::kElements
// InstructionShape::kM * InstructionShape::kN / 32
>;
using EpilogueWithLSE =
cutlass::epilogue::threadblock::EpilogueSmemAccumulator<
SmemIteratorD0,
FragmentIteratorAccumulator,
IteratorAccumulatorLSE,
EpilogueOpApplyLSE>;
static void CUTLASS_DEVICE accumToSmem(
AccumulatorSharedStorage& shared_storage,
FragmentC const& accum,
int lane_id,
cutlass::MatrixCoord const& tile_coords) {
SmemIteratorD0 smem_iterator_attn(shared_storage.accum_ref(), lane_id);
smem_iterator_attn.add_tile_offset(
tile_coords *
cutlass::MatrixCoord{
SmemIteratorD0::TileIterations::kRow,
SmemIteratorD0::TileIterations::kColumn});
Epilogue epilogue;
epilogue(OutputOpNoOp({}), smem_iterator_attn, accum);
}
static void CUTLASS_DEVICE accumApplyLSEToSmem(
AccumulatorSharedStorage& shared_storage,
FragmentC& accum,
lse_scalar_t const* lse,
int32_t lse_extents,
int thread_id,
int warp_id,
int lane_id,
cutlass::MatrixCoord const& tile_coords) {
constexpr int32_t kAlignLSE = 32;
IteratorAccumulatorLSE iterator_lse(
lse,
{(int32_t)0, (int32_t)ceil_div(lse_extents, kAlignLSE) * kAlignLSE},
thread_id,
warp_id,
cutlass::MatrixCoord{0, 0} // offset
);
SmemIteratorD0 smem_iterator_attn(shared_storage.accum_ref(), lane_id);
smem_iterator_attn.add_tile_offset(
tile_coords *
cutlass::MatrixCoord{
SmemIteratorD0::TileIterations::kRow,
SmemIteratorD0::TileIterations::kColumn});
EpilogueWithLSE epilogue;
EpilogueOpApplyLSE minus_lse_exp({});
epilogue(
minus_lse_exp,
smem_iterator_attn,
accum,
// scale - unused
iterator_lse,
// bias
iterator_lse);
}
};
// Volta Specialization
// only supported for f16
template <typename Operator, typename WarpShape_, typename ThreadblockShape_>
struct B2bGemm<
cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<
cutlass::MatrixShape<32, 32>,
float,
cutlass::layout::RowMajor,
cutlass::gemm::GemmShape<16, 16, 4>,
cutlass::MatrixShape<1, 1>>,
Operator,
cutlass::half_t,
WarpShape_,
ThreadblockShape_> {
using IteratorC =
cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<
cutlass::MatrixShape<32, 32>,
float,
cutlass::layout::RowMajor,
cutlass::gemm::GemmShape<16, 16, 4>,
cutlass::MatrixShape<1, 1>>;
using scalar_t = cutlass::half_t;
using accum_t = IteratorC::Element;
using WarpShape = WarpShape_;
using ThreadblockShape = ThreadblockShape_;
using FragmentC = IteratorC::Fragment;
using lse_scalar_t = float;
// Storage in shared-memory for Q.Kt
using SmemAccumulatorLayout =
cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<16, 32>;
using AccumulatorSharedStorage =
cutlass::gemm::threadblock::AccumulatorSharedStorage<
ThreadblockShape,
scalar_t,
SmemAccumulatorLayout,
cutlass::MatrixShape<0, 0> // Padding
>;
using TensorRef = cutlass::TensorRef<scalar_t, SmemAccumulatorLayout>;
using Policy = typename IteratorC::Policy;
using Element = accum_t;
// Those are MmaVoltaTensorOpAccumulatorTileIterator private fields
// Let's copy their values
static int const kElementsPerPartial = 4;
using EleShapePerPatial = typename cutlass::platform::conditional<
cutlass::platform::is_same<Element, float>::value,
cutlass::MatrixShape<2, 2>,
cutlass::MatrixShape<1, 4>>::type;
static int const kElementsPerMma = 8;
static int const kAccumulatorPatials = 2;
using QuadShapePerPatialMma = cutlass::MatrixShape<4, 4>;
static void CUTLASS_DEVICE accumToSmem(
AccumulatorSharedStorage& shared_storage,
FragmentC const& accum,
int lane_id,
cutlass::MatrixCoord const& tile_coords) {
// ctor - from MmaVoltaTensorOpAccumulatorTileIterator
TensorRef ref_(shared_storage.accum_ref());
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
int accum_m, accum_n;
if (cutlass::platform::is_same<Element, float>::value) {
// (quad[2],quad[0])+lane_in_quad[0]
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1);
// (quad[1])+lane_in_quad[1]
accum_n =
((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials +
(lane_in_quad & 2);
} else {
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 +
lane_in_quad; // (quad[2],quad[0])
accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials;
}
cutlass::MatrixCoord lane_offset(accum_m, accum_n);
// Tile offset
ref_.add_coord_offset(
tile_coords *
cutlass::MatrixCoord(
{IteratorC::Shape::kRow, IteratorC::Shape::kColumn}));
using AccessType = cutlass::Array<scalar_t, EleShapePerPatial::kColumn>;
// store - from MmaVoltaTensorOpAccumulatorTileIterator
CUTLASS_PRAGMA_UNROLL
for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) {
CUTLASS_PRAGMA_UNROLL
for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) {
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start =
(((tile_n * Policy::TileIterations::kRow + tile_m) *
Policy::MmaIterations::kColumn +
mma_n) *
Policy::MmaIterations::kRow +
mma_m) *
kElementsPerMma;
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < kAccumulatorPatials; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < EleShapePerPatial::kRow; ++m) {
int accum_m = tile_m * Policy::InterleavedTile::kRow +
mma_m * QuadShapePerPatialMma::kRow + m * 2;
int accum_n = tile_n * Policy::InterleavedTile::kColumn +
mma_n * QuadShapePerPatialMma::kColumn +
p * Policy::InterleavedTile::kColumn / 2;
int r = (accum_m + lane_offset.row());
AccessType to_store;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < EleShapePerPatial::kColumn; ++n) {
int idx = mma_accum_start + p * kElementsPerPartial +
m * EleShapePerPatial::kColumn + n;
int c = (accum_n + n + lane_offset.column());
to_store[n] = scalar_t(accum[idx]);
}
int c = (accum_n + lane_offset.column());
assert(r < 32);
assert(c < 32);
*reinterpret_cast<AccessType*>(
ref_.data() + ref_.offset({r, c})) = to_store;
}
}
}
}
}
}
}
static void CUTLASS_DEVICE accumApplyLSEToSmem(
AccumulatorSharedStorage& shared_storage,
typename IteratorC::Fragment& accum,
lse_scalar_t const* lse,
int lse_extent,
int thread_id,
int warp_id,
int lane_id,
cutlass::MatrixCoord const& tile_coords) {
// Non-optimized way to apply LSE to registers
// NOTE: accum is attn.T
// TODO: Optimize for each architecture
static constexpr int WarpSize = 32;
using AccumLambdaIterator =
typename DefaultMmaAccumLambdaIterator<IteratorC, accum_t, WarpSize>::
Iterator;
auto lane_offset =
AccumLambdaIterator::get_lane_offset(lane_id, warp_id, tile_coords);
cutlass::Array<lse_scalar_t, IteratorC::Fragment::kElements> lse_prefetched;
lse_prefetched.clear();
int rowIdx = 0;
int colIdx = 0;
AccumLambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {
++rowIdx;
colIdx = 0;
},
[&](int accum_m, int accum_n, int idx) {
if (rowIdx == 1) {
lse_prefetched[colIdx] = accum_n < lse_extent
? lse[accum_n]
: cutlass::platform::numeric_limits<accum_t>::infinity();
}
accum[idx] = expf(accum[idx] - lse_prefetched[colIdx]);
++colIdx;
},
[&](int accum_m) {});
accumToSmem(shared_storage, accum, lane_id, tile_coords);
}
};
// Simt Specialization
// for f32 on Sm70-Sm75 and f16/f32 below
template <
typename Operator,
typename OperatorPolicy,
typename scalar_t,
typename WarpShape_,
typename ThreadblockShape_>
struct B2bGemm<
cutlass::gemm::warp::MmaSimtTileIterator<
cutlass::MatrixShape<32, 32>,
cutlass::gemm::Operand::kC,
float,
cutlass::layout::RowMajor,
OperatorPolicy,
1,
1>,
Operator,
scalar_t,
WarpShape_,
ThreadblockShape_> {
using IteratorC = cutlass::gemm::warp::MmaSimtTileIterator<
cutlass::MatrixShape<32, 32>,
cutlass::gemm::Operand::kC,
float,
cutlass::layout::RowMajor,
OperatorPolicy,
1,
1>;
using accum_t = typename IteratorC::Element;
using WarpShape = WarpShape_;
using ThreadblockShape = ThreadblockShape_;
using FragmentC = typename IteratorC::Fragment;
using lse_scalar_t = float;
// Storage in shared-memory for Q.Kt
using AccumulatorSharedStorage =
cutlass::gemm::threadblock::AccumulatorSharedStorage<
ThreadblockShape,
scalar_t,
cutlass::layout::ColumnMajor,
cutlass::MatrixShape<0, 0> // Padding
>;
static void CUTLASS_DEVICE accumToSmem(
AccumulatorSharedStorage& shared_storage,
FragmentC const& accum,
int lane_id,
cutlass::MatrixCoord const& tile_coords) {
using Policy = typename IteratorC::Policy;
using Element = typename IteratorC::Element;
using Iterations = typename IteratorC::Iterations;
using Delta = typename IteratorC::Delta;
auto ref_ = shared_storage.accum_ref();
// ctor - MmaSimtTileIterator
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
MatrixCoord(Policy::LaneMmaShape::kM, Policy::LaneMmaShape::kN);
ref_.add_coord_offset(lane_offset);
// Tile offset
ref_.add_coord_offset(
tile_coords *
cutlass::MatrixCoord(
{IteratorC::Shape::kRow, IteratorC::Shape::kColumn}));
// store - MmaSimtTileIterator
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) {
int r =
Policy::LaneMmaShape::kM * (mma_m * Policy::WarpShape::kRow) +
m;
int c = mma_n * Delta::kColumn + n;
int idx = n +
Policy::LaneMmaShape::kN *
(mma_n +
Iterations::kColumn *
(m + mma_m * Policy::LaneMmaShape::kM));
ref_.at({r, c}) = scalar_t(accum[idx]);
}
}
}
}
}
static void CUTLASS_DEVICE accumApplyLSEToSmem(
AccumulatorSharedStorage& shared_storage,
typename IteratorC::Fragment& accum,
lse_scalar_t const* lse,
int lse_extent,
int thread_id,
int warp_id,
int lane_id,
cutlass::MatrixCoord const& tile_coords) {
// Non-optimized way to apply LSE to registers
// NOTE: accum is attn.T
// TODO: Optimize for each architecture
static constexpr int WarpSize = 32;
using AccumLambdaIterator =
typename DefaultMmaAccumLambdaIterator<IteratorC, accum_t, WarpSize>::
Iterator;
auto lane_offset =
AccumLambdaIterator::get_lane_offset(lane_id, warp_id, tile_coords);
cutlass::Array<lse_scalar_t, IteratorC::Fragment::kElements> lse_prefetched;
lse_prefetched.clear();
int rowIdx = 0;
int colIdx = 0;
AccumLambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {
++rowIdx;
colIdx = 0;
},
[&](int accum_m, int accum_n, int idx) {
if (rowIdx == 1) {
lse_prefetched[colIdx] = accum_n < lse_extent
? lse[accum_n]
: cutlass::platform::numeric_limits<accum_t>::infinity();
}
accum[idx] = expf(accum[idx] - lse_prefetched[colIdx]);
++colIdx;
},
[&](int accum_m) {});
accumToSmem(shared_storage, accum, lane_id, tile_coords);
}
};
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/gemm/mma_from_smem.h/0 | {
"file_path": "examples/41_fused_multi_head_attention/gemm/mma_from_smem.h",
"repo_id": "examples",
"token_count": 28628
} | 6 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Block-Ell sparse gemm example.
This example performs a Sparse-matrix dense-matrix multiplication (SpMM) operation.
Matrix A is stored in the Blocked-Ellpack (Blocked-ELL) storage format.
Details about the Blocked-Ellpack (Blocked-ELL) storage format can be found here:
https://docs.nvidia.com/cuda/cusparse/index.html#cusparse-generic-spmat-create-blockedell
Whereas matrix B is a dense matrix.
Blocked-Ellpack or Blocked-ELL storage format comprises of two matrices.
First is a packed matrix (ellValue matrix) that stores non-zero values in consecutive blocks,
represented by tensor_a in this example. Second is a matrix of indices (ellColInd matrix),
represented by tensor_ell_idx in this example, that represent the column indices of the
corresponding non-zero blocks. All rows in the matrices must have the same number of blocks.
ellColInd can contain -1 values for indicating empty blocks. These matrices store elements in
row-major order.
Description of parameters and tensors used to represent the Blocked-Ellpack (ELL) format
for this example:
a_rows - Rows in the sparse matrix.
a_cols - Colums in the sparse matrix.
a_ell_blocksize - Size of the ELL-Blocks.
a_ell_num_columns - Number of columns in the Blocked-Ellpack format (ellValue columns)
tensor_a - ellValue matrix, whose size is (a_rows * a_ell_num_columns)
tensor_ell_idx - Blocked-ELL Column indices (ellColInd), whose size is
(a_rows / a_ell_blocksize) * (a_ell_num_columns / a_ell_blocksize)
tensor_b - Input dense matrix whose size is (a_cols * n)
tensor_c/tensor_d - Output dense matrix whose size is (a_rows * n)
{a_rows, n, a_cols} - Problem size
*/
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <unordered_map>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_grouped.h"
#include "cutlass/gemm/kernel/default_gemm_grouped.h"
#include "cutlass/gemm/device/ell_gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/host_uncompress.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
bool reference_check;
int iterations;
int cuda_streams;
int a_rows, n, a_cols;
int a_ell_num_columns;
int a_ell_blocksize;
int a_base;
float alpha;
float beta;
//
// Methods
//
Options():
help(false),
reference_check(true),
iterations(20),
cuda_streams(0),
a_rows(1024),
n(1024),
a_cols(1024),
a_ell_num_columns(512),
a_ell_blocksize(16),
a_base(0),
alpha(1),
beta()
{ }
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("alpha", alpha, 1.0f);
cmd.get_cmd_line_argument("beta", beta, 0.0f);
cmd.get_cmd_line_argument("iterations", iterations, 20);
cmd.get_cmd_line_argument("streams", cuda_streams, 0);
cmd.get_cmd_line_argument("reference-check", reference_check, true);
cmd.get_cmd_line_argument("a_rows", a_rows, 1024);
cmd.get_cmd_line_argument("n", n, 1024);
cmd.get_cmd_line_argument("a_cols", a_cols, 1024);
cmd.get_cmd_line_argument("a_ell_num_columns", a_ell_num_columns, 512);
cmd.get_cmd_line_argument("a_ell_blocksize", a_ell_blocksize, 16);
cmd.get_cmd_line_argument("a_base", a_base, 0);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "43_ell_block_sparse_gemm\n\n"
<< " This example profiles the performance of a ELL block sparse GEMM kernel.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --a_rows=<int> Sets the number of the rows of the sparse matrix.\n"
<< " --n=<int> Sets the N dimension.\n"
<< " --a_cols=<int> Sets the number of columns of the sparse matrix.\n"
<< " --a_ell_num_columns=<int> Sets the actual number of columns of the Blocked-Ellpack format.\n"
<< " --a_ell_blocksize=<int> Sets the size of the ELL-Block.\n"
<< " --a_base=<int> Sets the base index.\n"
<< " --alpha=<f32> Epilogue scalar alpha (real part)\n"
<< " --beta=<f32> Epilogue scalar beta (real part)\n\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --reference-check=<bool> If true, performs reference check.\n";
out << "\n\nExamples:\n\n"
<< "# Runs a 1024x1024x1024 ELL block sparse GEMM with 16x16 block size and actual 512 non-zero columns in A operand\n"
<< "$ ./examples/43_ell_block_sparse_gemm/43_ell_block_sparse_gemm --a_rows=1024 --n=1024 --a_cols=1024 --a_ell_num_columns=512 --a_ell_blocksize=16\n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = (int64_t)a_rows * (int64_t)a_cols * (int64_t)n;
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Gemm>
class Testbed {
public:
//
// Type definitions
//
using ElementA = typename Gemm::ElementA;
using ElementB = typename Gemm::ElementB;
using ElementC = typename Gemm::ElementC;
using ElementAccumulator = typename Gemm::ElementAccumulator;
using EpilogueOutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp;
using ElementCompute = typename EpilogueOutputOp::ElementCompute;
using LayoutA = typename Gemm::LayoutA;
using LayoutB = typename Gemm::LayoutB;
using LayoutC = typename Gemm::LayoutC;
using MatrixCoord = typename LayoutC::TensorCoord;
private:
//
// Data members
//
Options options;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
cutlass::Distribution::Kind init_ELL;
uint32_t seed;
cutlass::HostTensor<ElementA, LayoutA> tensor_a;
cutlass::HostTensor<ElementB, LayoutB> tensor_b;
cutlass::HostTensor<ElementC, LayoutC> tensor_c;
cutlass::HostTensor<ElementC, LayoutC> tensor_d;
cutlass::HostTensor<ElementA, LayoutA> tensor_a_uncompressed;
cutlass::HostTensor<ElementC, LayoutC> reference_d;
cutlass::HostTensor<int32_t, LayoutA> tensor_ell_idx;
public:
//
// Methods
//
Testbed(
Options const &options_,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_ELL_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
):
options(options_), init_A(init_A_), init_B(init_B_), init_C(init_C_), init_ELL(init_ELL_), seed(seed_) { }
private:
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
void initialize_tensor_(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint32_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) {
scope_max = 5;
scope_min = -5;
}
else {
scope_max = 8;
scope_min = -8;
}
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(
view, seed, Element(), Element(0.5f));
}
else if (dist_kind == cutlass::Distribution::Sequential) {
// Fill with increasing elements
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity(), Element(1), Element());
} else {
// Fill with all 1s
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity(), Element(), Element(1));
}
}
/// Initializes data structures
void initialize_() {
tensor_a.resize(cutlass::make_Coord(options.a_rows, options.a_ell_num_columns));
tensor_b.resize(cutlass::make_Coord(options.a_cols, options.n));
tensor_c.resize(cutlass::make_Coord(options.a_rows, options.n));
tensor_d.resize(cutlass::make_Coord(options.a_rows, options.n));
tensor_a_uncompressed.resize(cutlass::make_Coord(options.a_rows, options.a_cols));
reference_d.resize(cutlass::make_Coord(options.a_rows, options.n));
tensor_ell_idx.resize(cutlass::make_Coord(options.a_rows / options.a_ell_blocksize,
options.a_ell_num_columns / options.a_ell_blocksize));
//
// Initialize the problems of the workspace
//
initialize_tensor_(tensor_a.host_view(), init_A, seed * 2021);
initialize_tensor_(tensor_b.host_view(), init_B, seed * 2022);
initialize_tensor_(tensor_c.host_view(), init_C, seed * 2023);
if (init_ELL == cutlass::Distribution::Uniform) {
cutlass::reference::host::TensorFillRandomEllIdx(
tensor_ell_idx.host_view(), seed,
options.a_rows / options.a_ell_blocksize,
options.a_ell_num_columns / options.a_ell_blocksize,
options.a_cols / options.a_ell_blocksize);
} else {
for(int i = 0; i < options.a_rows / options.a_ell_blocksize; ++i) {
for(int j = 0; j < options.a_ell_num_columns / options.a_ell_blocksize; ++j) {
tensor_ell_idx.at({i, j}) = j+3;
}
}
}
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ell_idx.sync_device();
}
/// Verifies the result is a GEMM
bool verify_() {
bool passed = true;
tensor_d.sync_host();
cutlass::uncompress_ell_block_sparse(
tensor_a_uncompressed.host_ref(),
tensor_a.host_ref(),
tensor_ell_idx.host_ref(),
options.a_rows,
options.a_cols,
options.a_ell_num_columns,
options.a_ell_blocksize
);
cutlass::reference::host::Gemm<
typename Gemm::ElementA, typename Gemm::LayoutA,
typename Gemm::ElementB, typename Gemm::LayoutB,
typename Gemm::ElementC, typename Gemm::LayoutC,
ElementCompute,
ElementAccumulator, typename Gemm::Operator>
reference_gemm;
reference_gemm(
{options.a_rows, options.n, options.a_cols},
options.alpha,
tensor_a_uncompressed.host_ref(),
tensor_b.host_ref(),
options.beta,
reference_d.host_ref(),
ElementAccumulator(0)
);
// Reference check
passed = cutlass::reference::host::TensorEquals(tensor_d.host_view(), reference_d.host_view());
if (!passed) {
std::cerr << "\n***\nError - problem failed the QA check\n***\n" << std::endl;
std::stringstream fname;
fname << "error_43_ell_block_sparse_gemm"
<< "mnk_"
<< options.a_rows << "x"
<< options.n << "x"
<< options.a_cols << "_"
<< options.a_ell_num_columns << "_"
<< options.a_ell_blocksize << ".txt";
std::cout << fname.str() << std::endl;
std::ofstream results(fname.str());
results
<< "alpha: " << ElementCompute(options.alpha) << "\n"
<< "beta: " << ElementCompute(options.beta) << "\n"
<< "block size: " << options.a_ell_blocksize << "\n"
<< "\nA:\n" << tensor_a.host_view() << "\n"
<< "\nA Ell Index:\n" << tensor_ell_idx.host_view() << "\n"
<< "\nB:\n" << tensor_b.host_view() << "\n"
<< "\nC:\n" << tensor_c.host_view() << "\n"
<< "\nD reference:\n" << reference_d.host_view() << "\n"
<< "\nD computed:\n" << tensor_d.host_view() << "\n";
return passed;
}
return passed;
}
public:
/// Returns the number of threadblocks to launch if the kernel can run on the target
/// device. Otherwise, returns zero.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes a BlockedEll SpMM kernel and measures runtime.
Result profile() {
Result result;
// Early exit
if (!sufficient()) {
std::cout << "Active CUDA device lacks hardware resources to run CUTLASS BlockedEll SpMM kernel." << std::endl;
return result;
}
result.passed = false;
// Initialize the problem
initialize_();
// Configure the GEMM arguments
typename EpilogueOutputOp::Params epilogue_op(options.alpha, options.beta);
// Configure GEMM arguments
typename Gemm::Arguments args(
{options.a_rows, options.n, options.a_cols},
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
tensor_ell_idx.device_data(),
options.a_ell_num_columns,
options.a_ell_blocksize,
options.a_base,
epilogue_op
);
// Initialize the GEMM object
Gemm gemm{};
result.status = gemm.initialize(args);
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to initialize CUTLASS BlockedEll SpMM kernel." << std::endl;
return result;
}
// Run the BlockedEll SpMM object
result.status = gemm.run();
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run CUTLASS BlockedEll SpMM kernel." << std::endl;
return result;
}
// Wait for completion
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error);
return result;
}
//
// Verify correctness
//
result.passed = true;
if (options.reference_check) {
result.passed = verify_();
}
//
// Warm-up run
//
result.status = gemm.run();
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run CUTLASS BlockedEll SpMM kernel." << std::endl;
return result;
}
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMM operations
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
gemm();
}
//
// Stop profiling loop
//
// Record an event when the GEMM operations have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
//
// Cleanup
//
for (auto event : events) {
(void)cudaEventDestroy(event);
}
std::cout << std::endl;
std::cout << "ELL Block Sparse GEMM (CUTLASS):\n"
<< "====================================================" << std::endl;
std::cout << std::endl;
std::cout << " " << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " " << " GFLOPs: " << result.gflops << std::endl;
return result;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
//
// This example uses mma.sync to directly access Tensor Cores to achieve peak performance.
//
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) {
//
// This example requires an NVIDIA Ampere-architecture GPU.
//
std::cout
<< "CUTLASS's BlockedEll SpMM example requires a GPU of NVIDIA's Ampere Architecture or "
<< "later (compute capability 80 or greater).\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
//
// Define the BlockedEll type
//
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementOutput = cutlass::half_t;
using ElementAccumulator = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using LayoutC = cutlass::layout::ColumnMajor;
constexpr int32_t kAlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value;
constexpr int32_t kAlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
constexpr int32_t kStages = 4;
using Gemm = typename cutlass::gemm::device::EllGemm<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementOutput,
LayoutC,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape,
WarpShape,
InstructionShape,
cutlass::epilogue::thread::LinearCombination<
ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value,
ElementAccumulator, ElementAccumulator>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
kStages, kAlignmentA, kAlignmentB>;
//
// Profile it
//
Testbed<Gemm> testbed(options);
if (!testbed.sufficient()) {
std::cout << "The active CUDA device lacks sufficient hardware resources to execute this kernel.\n";
return 0;
}
Result result = testbed.profile();
if (!result.passed) {
std::cout << "Profiling CUTLASS ELL block sparse GEMM has failed.\n";
std::cout << "\nFailed\n";
return -1;
}
std::cout << "\nPassed\n";
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/43_ell_block_sparse_gemm/ell_block_sparse_gemm.cu/0 | {
"file_path": "examples/43_ell_block_sparse_gemm/ell_block_sparse_gemm.cu",
"repo_id": "examples",
"token_count": 9447
} | 7 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import gen_ir
import helper
class gen_default_b2b_mma:
def __init__(self, template_param, gen_class_name, b2b_num,cutlass_deps_root, project_root):
self.gen_class_name = "DefaultB2bMma"
self.template_param = template_param
self.b2b_num = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_include_header(self):
code = '''
/* Auto Generated code - Do not edit.*/
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/numeric_types.h\"
#include \"{cutlass_dir}cutlass/arch/arch.h\"
#include \"{cutlass_dir}cutlass/transform/threadblock/predicated_tile_iterator.h\"
#include \"{cutlass_dir}cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm70.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm75.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm80.h\"
#include \"../threadblock/b2b_mma_pipelined.h\"
#include \"../../fixed_impl/epilogue/threadblock/fused_bias_act_epilogue.h\"
#include \"../../fixed_impl/epilogue/threadblock/default_bias_act_epilogue_tensor_op.h\"
#include \"../../fixed_impl/gemm/warp/mma_tensor_op_fragment_iterator_without_output_op.h\"
'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_using_MmaCore(self, stage):
threadBlockShape = "ThreadblockShape"
warpShape = "WarpShape"
instrunctionShape = "InstructionShape"
Mma_typename = "typename cutlass::gemm::threadblock::DefaultMmaCore"
gen_code = ""
for i in range(self.b2b_num):
code_using = "using MmaCore" + str(i)
gen_code += code_using + " = " + gen_ir.gen_declare_template_struct(Mma_typename, \
helper.var_idx(threadBlockShape, i), helper.var_idx(warpShape, i), instrunctionShape, \
"ElementA", "LayoutA", \
helper.var_idx("ElementB", i), helper.var_idx("LayoutB", i), \
helper.var_idx("ElementAccumulator", i), "layout::RowMajor", \
"OperatorClass", str(stage), "Operator")
return gen_code
def gen_using_FusedAddBiasEpilogue(self):
gen_code = ""
for i in range(self.b2b_num - 1):
code_using = helper.var_idx("using FusedAddBiasEpilogue", i)
epilogue_name = "typename cutlass::epilogue::threadblock::DefaultFusedBiasActEpilogueTensorOp"
template_args = helper.var_idx("<ThreadblockShape", i) + helper.var_idx(",typename MmaCore", i) + helper.var_idx("::MmaPolicy::Operator, 1, EpilogueOutputOp", i) + ", 2>::Epilogue"
gen_code += code_using + " = " + epilogue_name + template_args + ";\n"
return gen_code
def gen_using_Iterator(self):
code_using = "using IteratorA0"
iterator_typename = "cutlass::transform::threadblock::PredicatedTileIterator"
MmaCore = "MmaCore0"
matrix_shape = "cutlass::MatrixShape<" + MmaCore + "::Shape::kM, " + MmaCore + "::Shape::kK>"
iterator_map = "typename " + MmaCore + "::IteratorThreadMapA"
gen_code = code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, \
matrix_shape, "ElementA", "LayoutA", "1", iterator_map, "AlignmentA_")
for i in range(self.b2b_num):
code_using = "using IteratorB" + str(i)
iterator_typename = "cutlass::transform::threadblock::PredicatedTileIterator"
MmaCore = "MmaCore" + str(i)
matrix_shape = "cutlass::MatrixShape<" + MmaCore + "::Shape::kK, " + MmaCore + "::Shape::kN>"
iterator_map = "typename " + MmaCore + "::IteratorThreadMapB"
gen_code += code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, \
matrix_shape, helper.var_idx("ElementB", i), helper.var_idx("LayoutB", i), "0", iterator_map, "AlignmentB_")
return gen_code
def gen_fragment_iterator(self):
gen_code = "using AccumulatorLayout = cutlass::layout::ColumnMajor;\n"
for i in range(1, self.b2b_num):
code_using = "using FragmentIteratorA" + str(i)
iterator_typename = "cutlass::gemm::warp::MmaTensorOpPureFragmentIterator"
curr_MmaCore = "MmaCore" + str(i)
prev_MmaCore = "MmaCore" + str(i - 1)
Matrix_shape_curr = "cutlass::MatrixShape<" + curr_MmaCore + "::WarpShape::kM, " + curr_MmaCore + "::InstructionShape::kK>"
Matrix_shape_prev = "cutlass::MatrixShape<" + prev_MmaCore + "::WarpShape::kM, " + prev_MmaCore + "::WarpShape::kN>"
Curr_shape_kK = curr_MmaCore + "::Shape::kK"
gen_code += code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, \
Matrix_shape_curr, Matrix_shape_prev, Curr_shape_kK, \
helper.var_idx("ElementAccumulator", i-1), "ElementA", \
"AccumulatorLayout", "InstructionShape_", "true")
return gen_code
def gen_threadblockmma(self):
code_using = "using ThreadblockB2bMma"
iterator_typename = "cutlass::gemm::threadblock::B2bMmaPipelined"
MmaPipelined_param_Mma0_shape = "typename MmaCore0::Shape"
MmaPipelined_param_Mma0_iteratorA = "IteratorA0"
MmaPipelined_param_Mma0_smemIteratorA = "typename MmaCore0::SmemIteratorA"
MmaPipelined_param_Mma0_iteratorB = "IteratorB0"
MmaPipelined_param_Mma0_smemIteratorB = "typename MmaCore0::SmemIteratorB"
MmaPipelined_param_list = MmaPipelined_param_Mma0_shape + ", " + MmaPipelined_param_Mma0_iteratorA + ", " + MmaPipelined_param_Mma0_smemIteratorA + ", " + MmaPipelined_param_Mma0_iteratorB + ", " + MmaPipelined_param_Mma0_smemIteratorB + ", "
for i in range(1, self.b2b_num):
MmaPipelined_param_Mma_shape = "typename MmaCore" + str(i) + "::Shape"
MmaPipelined_param_Mma_iteratorA = "FragmentIteratorA" + str(i)
MmaPipelined_param_Mma_iteratorB = "IteratorB" + str(i)
MmaPipelined_param_Mma_smemIteratorB = "typename MmaCore" + str(i) + "::SmemIteratorB"
MmaPipelined_param_list += MmaPipelined_param_Mma_shape + ", " + MmaPipelined_param_Mma_iteratorA + ", " + MmaPipelined_param_Mma_iteratorB + ", " + MmaPipelined_param_Mma_smemIteratorB + ", "
MmaPipelined_param_list += "ElementAccumulator0, layout::RowMajor, "
for i in range(self.b2b_num - 1):
epilogue_name = "EpilogueOutputOp" + str(i)
MmaPipelined_param_list += epilogue_name + ", "
for i in range(self.b2b_num - 1):
epilogue_name = "FusedAddBiasEpilogue" + str(i)
MmaPipelined_param_list += epilogue_name + ", "
for i in range(self.b2b_num):
MmaPolicy = "typename MmaCore" + str(i) + "::MmaPolicy"
MmaPipelined_param_list += MmaPolicy + ", "
cnt = 0
for i in range(self.b2b_num):
MmaStage = helper.var_idx("Stages", i)
final = ", "
if cnt == self.b2b_num - 1:
final = ""
MmaPipelined_param_list += MmaStage + final
cnt += 1
gen_code = code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, MmaPipelined_param_list)
return gen_code
def gen_code(self):
gen_using = ''
# Generate default template struct
gen_code = gen_ir.gen_template_struct(self.gen_class_name, self.template_param, "", speicalized = None, set_default=False)
# Generate specialized template struct
mmacore_codebody = self.gen_using_MmaCore(2)
iterator_codebody = self.gen_using_Iterator()
fragment_iterator_codebody = self.gen_fragment_iterator()
epilogue_iterator_codebody = self.gen_using_FusedAddBiasEpilogue()
threadBlockMma = self.gen_threadblockmma()
specialized_code = mmacore_codebody + iterator_codebody + fragment_iterator_codebody + epilogue_iterator_codebody + threadBlockMma
# Specialize layout C -> cutlass::layout::RowMajor
rtn_template_args, speicalized_template_args = gen_ir.filtered_param(self.template_param, [ ('LayoutD', "cutlass::layout::RowMajor")], keep_= True)
gen_speical_code = gen_ir.gen_template_struct(self.gen_class_name, rtn_template_args, specialized_code, speicalized = speicalized_template_args, set_default=False)
code = gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("threadblock", gen_code + gen_speical_code)))
return self.gen_include_header() + code
class gen_b2b_mme_pipelined:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = "B2bMmaPipelined"
self.template_param = template_param
self.b2b_num = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/array.h\"
#include \"{cutlass_dir}cutlass/aligned_buffer.h\"
#include \"{cutlass_dir}cutlass/numeric_conversion.h\"
#include \"{cutlass_dir}cutlass/numeric_types.h\"
#include \"{cutlass_dir}cutlass/matrix_shape.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h\"
#include \"../threadblock/b2b_mma_base.h\"\n'''.format(cutlass_dir = self.cutlass_deps_root)
return code
def gen_using(self):
code_using = "using FragmentA0 = typename IteratorA0::Fragment;\n"
code_using += "using Base = B2bMmaBase<"
for i in range(self.b2b_num):
code_using += helper.var_idx("Shape", i) + "_, "
for i in range(self.b2b_num):
code_using += helper.var_idx("Policy", i) + "_, "
for i in range(self.b2b_num):
code_using += helper.var_idx("Stage", i) + "_, "
code_using = code_using[: -2] + ">;\n"
for i in range(self.b2b_num):
code_using += helper.var_idx("using FragmentB", i) + helper.var_idx(" = typename IteratorB", i) + "::Fragment;\n"
code_using += helper.var_idx("using FragmentC", i) + helper.var_idx(" = typename Policy", i) + "::Operator::FragmentC;\n"
code_using += helper.var_idx("using Operator", i) + helper.var_idx(" = typename Policy", i) + "::Operator;\n"
for i in range(self.b2b_num - 1):
code_using += helper.var_idx("using IteratorC", i) + helper.var_idx(" = typename FusedAddBiasEpilogue", i) + "::OutputTileIterator;\n"
code_using += "using ArchTag = typename Policy0::Operator::ArchTag;\n"
code_using += "static ComplexTransform const kTransformA0 = Operator0::kTransformA;\n"
for i in range(self.b2b_num):
code_using += helper.var_idx("static ComplexTransform const kTransformB", i) + helper.var_idx(" = Operator", i) + "::kTransformB;\n"
code_using += "private:\n"
code_using += "using WarpFragmentA0 = typename Operator0::FragmentA;\n"
code_using += "using WarpFragmentB0 = typename Operator0::FragmentB;\n"
for i in range(1, self.b2b_num):
code_using += helper.var_idx("using WarpFragmentA", i) + helper.var_idx(" = typename FragmentIteratorA", i) + "::Fragment;\n"
code_using += helper.var_idx("using WarpFragmentB", i) + helper.var_idx(" = typename Operator", i) + "::FragmentB;\n"
code_using += "protected:\n"
code_using += "SmemIteratorA0 smem_iterator_A_;\n"
for i in range(self.b2b_num):
code_using += helper.var_idx("SmemIteratorB", i) + helper.var_idx(" smem_iterator_B", i) + "_;\n"
return code_using
def gen_operator(self, first_use_1stage = False):
code = ""
def gen_operator_param(b2b_num):
param_code = ""
param_code += "int gemm_k_iterations_0,\n"
param_code += helper.var_idx("FragmentC", b2b_num-1) + helper.var_idx(" &accum", b2b_num-1) + ",\n"
param_code += "IteratorA0 iterator_A,\n"
for i in range(b2b_num):
param_code += helper.var_idx("IteratorB", i) + " " + helper.var_idx("iterator_B", i) + ",\n"
param_code += "FragmentC0 const &src_accum, \n"
for i in range(b2b_num - 1):
param_code += helper.var_idx("OutputOp", i) + " " + helper.var_idx("output_op_", i) + ",\n"
for i in range(b2b_num - 1):
param_code += helper.var_idx("FusedAddBiasEpilogue", i) + " " + helper.var_idx("epilogue_", i) + ",\n"
for i in range(b2b_num - 1):
param_code += helper.var_idx("IteratorC", i) + " " + helper.var_idx("iterator_C", i) + ",\n"
param_code += "TransformA0 transform_A0 = TransformA0(), \n"
for i in range(b2b_num):
final = "(),\n"
if i == b2b_num - 1:
final = "()\n"
param_code += helper.var_idx("TransformB", i) + " " + helper.var_idx("transform_B", i) + " = " +helper.var_idx("TransformB", i) + final
return param_code
def gen_first_gemm_1stage(b2b_num):
accu_code = " FragmentC0 accum0 = src_accum;\n"
if b2b_num == 1:
accu_code = " accum0 = src_accum;\n"
code ="\
\n\
FragmentA0 tb_frag_A;\n\
FragmentB0 tb_frag_B0;\n\
\n\
int smem_write_stage_idx = 1;\n\
\n\
tb_frag_A.clear();\n\
tb_frag_B0.clear();\n\
\n\
// The last kblock is loaded in the prolog\n\
iterator_A.load(tb_frag_A);\n\
iterator_B0.load(tb_frag_B0);\n\
\n\
++iterator_A;\n\
++iterator_B0;\n\
\n\
WarpFragmentA0 warp_frag_A0;\n\
WarpFragmentB0 warp_frag_B0;\n\
\n\
Operator0 warp_mma0;\n\
\n\
// Avoid reading out of bounds\n\
if (gemm_k_iterations_0 <= 1) {\n\
iterator_A.clear_mask();\n\
iterator_B0.clear_mask();\n\
}\n\
\n\
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing \n\
// shared memory loads (which have the tightest latency requirement).\n\
\n\
//\n\
// Mainloop\n\
//\n\
\n\
// Note: The main loop does not support Base::WarpGemmIterations == 2.\n\
CUTLASS_GEMM_LOOP\n\
for (; gemm_k_iterations_0 > 0; --gemm_k_iterations_0) {\n\
\n\
this->smem_iterator_A_.store(tb_frag_A);\n\
this->smem_iterator_B0_.store(tb_frag_B0);\n\
\n\
__syncthreads();\n\
//\n\
// Loop over GEMM K dimension\n\
//\n\
\n\
CUTLASS_PRAGMA_UNROLL\n\
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) {\n\
\n\
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group\n\
// as the case may be.\n\
\n\
this->warp_tile_iterator_A0_.set_kgroup_index(warp_mma_k % Base::kWarpGemmIterations0);\n\
this->warp_tile_iterator_B0_.set_kgroup_index(warp_mma_k % Base::kWarpGemmIterations0);\n\
\n\
this->warp_tile_iterator_A0_.load(warp_frag_A0);\n\
this->warp_tile_iterator_B0_.load(warp_frag_B0);\n\
\n\
++this->warp_tile_iterator_A0_;\n\
++this->warp_tile_iterator_B0_;\n\
\n\
warp_mma0(accum0, warp_frag_A0, warp_frag_B0, accum0);\n\
}\n\
this->warp_tile_iterator_A0_.add_tile_offset({0, -Policy0::kPartitionsK * Base::kWarpGemmIterations0});\n\
this->warp_tile_iterator_B0_.add_tile_offset({-Policy0::kPartitionsK * Base::kWarpGemmIterations0, 0});\n\
\n\
__syncthreads();\n\
iterator_A.load(tb_frag_A);\n\
iterator_B0.load(tb_frag_B0);\n\
\n\
++iterator_A;\n\
++iterator_B0;\n\
\n\
if(gemm_k_iterations_0 <= 2) {\n\
iterator_A.clear_mask();\n\
iterator_B0.clear_mask();\n\
}\n\
}\n"
return accu_code + code
def gen_first_gemm_2stage(b2b_num):
accu_code = " FragmentC0 accum0 = src_accum;\n"
if b2b_num == 1:
accu_code = " accum0 = src_accum;\n"
code ="\
\n\
FragmentA0 tb_frag_A;\n\
FragmentB0 tb_frag_B0;\n\
\n\
tb_frag_A.clear();\n\
tb_frag_B0.clear();\n\
\n\
// The last kblock is loaded in the prolog\n\
iterator_A.load(tb_frag_A);\n\
iterator_B0.load(tb_frag_B0);\n\
\n\
++iterator_A;\n\
++iterator_B0;\n\
\n\
this->smem_iterator_A_.store(tb_frag_A);\n\
this->smem_iterator_B0_.store(tb_frag_B0);\n\
\n\
++this->smem_iterator_A_;\n\
++this->smem_iterator_B0_;\n\
\n\
__syncthreads();\n\
\n\
// Pair of fragments used to overlap shared memory loads and math instructions\n\
WarpFragmentA0 warp_frag_A0[2];\n\
WarpFragmentB0 warp_frag_B0[2];\n\
\n\
this->warp_tile_iterator_A0_.set_kgroup_index(0);\n\
this->warp_tile_iterator_B0_.set_kgroup_index(0);\n\
\n\
this->warp_tile_iterator_A0_.load(warp_frag_A0[0]);\n\
this->warp_tile_iterator_B0_.load(warp_frag_B0[0]);\n\
\n\
++this->warp_tile_iterator_A0_;\n\
++this->warp_tile_iterator_B0_;\n\
\n\
Operator0 warp_mma0;\n\
\n\
int smem_write_stage_idx = 1;\n\
\n\
// Avoid reading out of bounds\n\
if (gemm_k_iterations_0 <= 1) {\n\
iterator_A.clear_mask();\n\
iterator_B0.clear_mask();\n\
}\n\
\n\
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing \n\
// shared memory loads (which have the tightest latency requirement).\n\
iterator_A.load(tb_frag_A);\n\
\n\
//\n\
// Mainloop\n\
//\n\
\n\
// Note: The main loop does not support Base::WarpGemmIterations == 2.\n\
CUTLASS_GEMM_LOOP\n\
for (; gemm_k_iterations_0 > 0; --gemm_k_iterations_0) {\n\
\n\
//\n\
// Loop over GEMM K dimension\n\
//\n\
\n\
CUTLASS_PRAGMA_UNROLL\n\
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) {\n\
\n\
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group\n\
// as the case may be.\n\
\n\
if (warp_mma_k == Base::kWarpGemmIterations0 - 1) {\n\
\n\
// Write fragments to shared memory\n\
this->smem_iterator_A_.store(tb_frag_A);\n\
\n\
this->smem_iterator_B0_.store(tb_frag_B0);\n\
\n\
__syncthreads();\n\
\n\
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing \n\
// shared memory loads (which have the tightest latency requirement).\n\
iterator_A.load(tb_frag_A);\n\
\n\
++this->smem_iterator_B0_;\n\
++this->smem_iterator_A_;\n\
\n\
\n\
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory\n\
if (smem_write_stage_idx == 1) {\n\
this->smem_iterator_A_.add_tile_offset({0, -Base::Stage0});\n\
this->smem_iterator_B0_.add_tile_offset({-Base::Stage0, 0});\n\
}\n\
else {\n\
this->warp_tile_iterator_A0_.add_tile_offset(\n\
{0, -Base::Stage0 * Policy0::kPartitionsK * Base::kWarpGemmIterations0});\n\
this->warp_tile_iterator_B0_.add_tile_offset(\n\
{-Base::Stage0 * Policy0::kPartitionsK * Base::kWarpGemmIterations0,\n\
0});\n\
}\n\
\n\
smem_write_stage_idx ^= 1;\n\
}\n\
\n\
this->warp_tile_iterator_A0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);\n\
this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);\n\
\n\
this->warp_tile_iterator_A0_.load(warp_frag_A0[(warp_mma_k + 1) % 2]);\n\
this->warp_tile_iterator_B0_.load(warp_frag_B0[(warp_mma_k + 1) % 2]);\n\
\n\
++this->warp_tile_iterator_A0_;\n\
++this->warp_tile_iterator_B0_;\n\
\n\
if (warp_mma_k == 0) {\n\
\n\
iterator_B0.load(tb_frag_B0);\n\
\n\
++iterator_A;\n\
++iterator_B0;\n\
\n\
// Avoid reading out of bounds if this was the last loop iteration\n\
if (gemm_k_iterations_0 <= 2) {\n\
iterator_A.clear_mask();\n\
iterator_B0.clear_mask();\n\
}\n\
}\n\
\n\
warp_mma0(accum0, warp_frag_A0[warp_mma_k % 2], warp_frag_B0[warp_mma_k % 2], accum0);\n\
}\n\
}\n"
return accu_code + code
def gen_other_gemms_2stage(b2b_num):
code = ""
def gemm_teamplate(id):
code = "// " + str(id + 1) + " Gemm"
code += " /// Iterator to load a warp-scoped tile of A1 operand from intermediate accumulator tile\n"
code += " " + helper.var_idx("FragmentC", id - 1) + helper.var_idx(" after_epilogue_accu", id - 1) + ";\n"
code += " " + helper.var_idx("epilogue_", id - 1) + helper.var_idx("(output_op_", id - 1) + helper.var_idx(", accum", id - 1) \
+ helper.var_idx(", after_epilogue_accu", id - 1) + helper.var_idx(", iterator_C", id - 1) +");\n"
# FragmentIteratorA1 warp_tile_iterator_A1_(accum0);
code += " " + helper.var_idx("FragmentIteratorA", id) + helper.var_idx(" warp_tile_iterator_A", id) +"_(" + helper.var_idx("after_epilogue_accu", id - 1) + ");\n"
# FragmentB1 tb_frag_B1;
code += " " + helper.var_idx("FragmentB", id) + " " + helper.var_idx("tb_frag_B", id) + ";\n"
# tb_frag_B1.clear();
code += " " + helper.var_idx("tb_frag_B", id) + ".clear();\n"
# iterator_B1.load(tb_frag_B1);
code += " " + helper.var_idx("iterator_B", id) + ".load(" + helper.var_idx("tb_frag_B", id) + ");\n"
# ++iterator_B1;
code += " " + "++" + helper.var_idx("iterator_B", id) + ";\n"
# this->smem_iterator_B1_.store(tb_frag_B1);
code += " " + helper.var_idx("this->smem_iterator_B", id) + "_.store(" + helper.var_idx("tb_frag_B", id) + ");\n"
# ++this->smem_iterator_B1_;
code += " " + helper.var_idx("++this->smem_iterator_B", id) + "_;\n"
# __syncthreads();
code += " " + "__syncthreads();\n"
# WarpFragmentA1 warp_frag_A1[2];
code += " " + helper.var_idx("WarpFragmentA", id) + helper.var_idx(" warp_frag_A", id) + "[2];\n"
# WarpFragmentB1 warp_frag_B1[2];
code += " " + helper.var_idx("WarpFragmentB", id) + helper.var_idx(" warp_frag_B", id) + "[2];\n"
# this->warp_tile_iterator_B1_.set_kgroup_index(0);
code += " " + helper.var_idx("this->warp_tile_iterator_B", id) + "_.set_kgroup_index(0);\n"
# warp_tile_iterator_A1_.load(warp_frag_A1[0], output_op_0);
code += " " + helper.var_idx("warp_tile_iterator_A", id) + helper.var_idx("_.load(warp_frag_A", id) + "[0]);\n"
# this->warp_tile_iterator_B1_.load(warp_frag_B1[0]);
code += " " + helper.var_idx("this->warp_tile_iterator_B", id) + helper.var_idx("_.load(warp_frag_B", id) + "[0]);\n"
# ++warp_tile_iterator_A1_;
code += " " + helper.var_idx("++warp_tile_iterator_A", id) + "_;\n"
# ++this->warp_tile_iterator_B1_;
code += " " + helper.var_idx("++this->warp_tile_iterator_B", id) + "_;\n"
# Operator1 warp_mma1;
code += " " + helper.var_idx("Operator", id) + " " + helper.var_idx("warp_mma", id) + ";\n"
# smem_write_stage_idx = 1;
code += " " + "smem_write_stage_idx = 1;\n"
# int gemm_k_iterations_1 = FragmentIteratorA1::Policy::kIterations / Base::kWarpGemmIterations1;
code += " " + helper.var_idx("int gemm_k_iterations_", id) + " = " + helper.var_idx("FragmentIteratorA", id) + helper.var_idx("::Policy::kIterations / Base::kWarpGemmIterations", id) +";\n"
# if (gemm_k_iterations_1 <= 1) {
# iterator_B1.clear_mask();
# }
code += " " + "if (" + helper.var_idx("gemm_k_iterations_", id) + " <= 1 ){\n" \
+ " " + " " + helper.var_idx("iterator_B", id) + ".clear_mask();\n" \
+ " " +"}\n"
# CUTLASS_PRAGMA_UNROLL
code += " " + "CUTLASS_PRAGMA_UNROLL\n"
# for (; gemm_k_iterations_1 > 0; --gemm_k_iterations_1) {
code += " " + helper.var_idx("for (; gemm_k_iterations_", id) + helper.var_idx(" > 0; --gemm_k_iterations_", id) + ") {\n"
# CUTLASS_PRAGMA_UNROLL
code += " " + " " + "CUTLASS_PRAGMA_UNROLL\n"
# for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) {
code += " " + " " + helper.var_idx("for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations", id) + "; ++warp_mma_k) {\n"
# if (warp_mma_k == Base::kWarpGemmIterations1 - 1) {
code += " " + " " + " " + helper.var_idx("if (warp_mma_k == Base::kWarpGemmIterations", id) + " - 1) {\n"
# this->smem_iterator_B1_.store(tb_frag_B1);
code += " " + " " + " " + " " + helper.var_idx(" this->smem_iterator_B", id) + helper.var_idx("_.store(tb_frag_B", id) + ");\n"
# __syncthreads();
code += " " + " " + " " + " " + "__syncthreads();\n"
# ++smem_iterator_B1_;
code += " " + " " + " " + " " + helper.var_idx(" ++smem_iterator_B", id) + "_;\n"
# if (smem_write_stage_idx == 1) {
# smem_iterator_B1_.add_tile_offset({-Base::Stage, 0});
# }
code += " " + " " + " " + " " + "if ( smem_write_stage_idx == 1 ) {\n" \
+ " " + " " + " " + " " + " " + helper.var_idx("smem_iterator_B", id) + helper.var_idx("_.add_tile_offset({-Base::Stage", i) + ", 0});\n" \
+ " " + " " + " " + " " +"}\n"
# else {
# this->warp_tile_iterator_B1_.add_tile_offset(
# {-Base::Stage * Policy1::kPartitionsK *
# Base::kWarpGemmIterations1,
# 0});
# }
code += " " + " " + " " + " " + "else {\n" \
+ " " + " " + " " + " " + " " + helper.var_idx("this->warp_tile_iterator_B", id) + "_.add_tile_offset(\n" \
+ " " + " " + " " + " " + " " + helper.var_idx("{-Base::Stage", id) + helper.var_idx(" * Policy", id) + "::kPartitionsK *\n" \
+ " " + " " + " " + " " + " " + helper.var_idx("Base::kWarpGemmIterations", id) + ",\n" \
+ " " + " " + " " + " " + " " + "0});\n" \
+ " " + " " + " " + " " + "}\n"
# smem_write_stage_idx ^= 1;
# }
code += " " + " " + " " + " " + "smem_write_stage_idx ^= 1;\n" \
+ " " + " " + " " + "}\n"
# this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations1);
code += " " + " " + " " + helper.var_idx("this->warp_tile_iterator_B", id) + helper.var_idx("_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations", id) + ");\n"
# warp_tile_iterator_A1_.load(warp_frag_A1[(warp_mma_k + 1) % 2], output_op_0);
code += " " + " " + " " + helper.var_idx("warp_tile_iterator_A", id) + helper.var_idx("_.load(warp_frag_A", id) + "[(warp_mma_k + 1) % 2]);\n"
# this->warp_tile_iterator_B1_.load(warp_frag_B1[(warp_mma_k + 1) % 2]);
code += " " + " " + " " + helper.var_idx("this->warp_tile_iterator_B", id) + helper.var_idx("_.load(warp_frag_B", id) + "[(warp_mma_k + 1) % 2]);\n"
# ++warp_tile_iterator_A1_;
code += " " + " " + " " + helper.var_idx("++warp_tile_iterator_A", id) + "_;\n"
# ++this->warp_tile_iterator_B1_;
code += " " + " " + " " + helper.var_idx("++this->warp_tile_iterator_B", id) + "_;\n"
# if (warp_mma_k == 0) {
# iterator_B1.load(tb_frag_B1);
# ++iterator_B1;
# if (gemm_k_iterations_1 <= 2) {
# iterator_B1.clear_mask();
# }
# }
code += " " + " " + " " + " if (warp_mma_k == 0) {\n" \
+ " " + " " + " " + " " + helper.var_idx("iterator_B", id) + helper.var_idx(".load(tb_frag_B", id) + ");\n" \
+ " " + " " + " " + " " + helper.var_idx("++iterator_B", id) +";\n" \
+ " " + " " + " " + " " + helper.var_idx("if (gemm_k_iterations_", id) +" <= 2) {\n" \
+ " " + " " + " " + " " + " " + helper.var_idx("iterator_B", id) + ".clear_mask();\n" \
+ " " + " " + " " + " " + "}\n" \
+ " " + " " + " " + "}\n"
# warp_mma1(accum, warp_frag_A1[warp_mma_k % 2], warp_frag_B1[warp_mma_k % 2], accum);
# }
# }
code += " " + " " + " " + helper.var_idx("warp_mma", id) + helper.var_idx("(accum", id) + helper.var_idx(", warp_frag_A", id) + helper.var_idx("[warp_mma_k % 2], warp_frag_B", id) + helper.var_idx("[warp_mma_k % 2], accum", id) + ");\n" \
+ " " + " " + "}\n" \
+ " " + "}\n\n\n"
return code
for i in range (1, b2b_num):
clear_accu = ""
if i != b2b_num - 1:
clear_accu = " " + helper.var_idx("FragmentC", i) + helper.var_idx(" accum", i) +";\n"
clear_accu += " " + helper.var_idx("accum", i) +".clear();\n"
code += clear_accu + gemm_teamplate(i)
return code
operator_code = " CUTLASS_DEVICE\n\
void operator()(\n " + gen_operator_param(self.b2b_num) + ") {\n"
if first_use_1stage:
operator_code += gen_first_gemm_1stage(self.b2b_num)
else:
operator_code += gen_first_gemm_2stage(self.b2b_num)
operator_code += gen_other_gemms_2stage(self.b2b_num) + "}\n"
return operator_code
def gen_construct_func(self):
name = self.gen_class_name
func_code = "CUTLASS_DEVICE\n"
func_code += name + "(\n" \
+ " " + "typename Base::B2bMmaSharedStorage &shared_storage,\n" \
+ " " + "int thread_idx,\n" \
+ " " + "int warp_idx,\n" \
+ " " + "int lane_idx\n" \
+ "):\n"
func_code += " " + "Base(shared_storage, thread_idx, warp_idx, lane_idx),\n" \
+ " " + "smem_iterator_A_(shared_storage.sharedStorage0.operand_A_ref(), thread_idx),\n"
for i in range(self.b2b_num):
final = ",\n"
if i == self.b2b_num - 1:
final = " {\n"
func_code += helper.var_idx("smem_iterator_B", i) + helper.var_idx("_(shared_storage.sharedStorage", i) +".operand_B_ref(), thread_idx)" + final
func_code += " " + "int warp_idx_mn = warp_idx % (Base::WarpCount0::kM * Base::WarpCount0::kN);\n"
func_code += " " + "int warp_idx_k = warp_idx / (Base::WarpCount0::kM * Base::WarpCount0::kN);\n"
func_code += " " + "int warp_idx_m = warp_idx_mn % Base::WarpCount0::kM;\n"
func_code += " " + "int warp_idx_n = warp_idx_mn / Base::WarpCount0::kM;\n"
for i in range(self.b2b_num):
func_code += " " + helper.var_idx("int tile_offset_k", i) + helper.var_idx(" = Base::kWarpGemmIterations", i) + " * warp_idx_k;\n"
func_code += " " + "this->warp_tile_iterator_A0_.add_tile_offset({warp_idx_m, tile_offset_k0});\n"
for i in range(self.b2b_num):
func_code += " " + helper.var_idx("this->warp_tile_iterator_B", i) + helper.var_idx("_.add_tile_offset({tile_offset_k", i) + ", warp_idx_n});\n"
func_code += "}\n"
return func_code
def gen_member_func(self, first_use_1stage):
code = "public:\n"
code += self.gen_operator(first_use_1stage)
code += self.gen_construct_func()
return code
def gen_code(self, first_use_1stage):
def gen_template_args(b2b_num):
template_param = []
template_param.append(("typename", "Shape0"))
template_param.append(("typename", "IteratorA0"))
template_param.append(("typename", "SmemIteratorA0"))
template_param.append(("typename", "IteratorB0"))
template_param.append(("typename", "SmemIteratorB0"))
for i in range(1, b2b_num):
template_param.append(("typename", helper.var_idx("Shape", i)))
template_param.append(("typename", helper.var_idx("FragmentIteratorA", i)))
template_param.append(("typename", helper.var_idx("IteratorB", i)))
template_param.append(("typename", helper.var_idx("SmemIteratorB", i)))
template_param.append(("typename", "ElementC"))
template_param.append(("typename", "LayoutC"))
for i in range(0, b2b_num - 1):
template_param.append(("typename", helper.var_idx("OutputOp", i)))
for i in range(0, b2b_num - 1):
template_param.append(("typename", helper.var_idx("FusedAddBiasEpilogue", i)))
for i in range(0, b2b_num):
template_param.append(("typename", helper.var_idx("Policy", i)))
for i in range(0, b2b_num):
template_param.append((int, helper.var_idx("Stage", i)))
template_param.append(("typename","TransformA0", "NumericArrayConverter<typename SmemIteratorA0_::Element, typename IteratorA0_::Element, IteratorA0_::Fragment::kElements>"))
for i in range(0, b2b_num):
cvtr = helper.var_idx("NumericArrayConverter<typename SmemIteratorB", i) + helper.var_idx("_::Element, typename IteratorB", i) + helper.var_idx("_::Element, IteratorB", i) + "_::Fragment::kElements>"
template_param.append(("typename", helper.var_idx("TransformB", i), cvtr))
template_param.append(("typename", "Enable", "bool"))
return template_param
template_param = gen_template_args(self.b2b_num)
inheritance_code = "public B2bMmaBase<"
for i in range(self.b2b_num):
inheritance_code += helper.var_idx("Shape", i) + "_, "
for i in range(self.b2b_num):
inheritance_code += helper.var_idx("Policy", i) + "_, "
for i in range(self.b2b_num - 1):
inheritance_code += helper.var_idx("Stage", i) + "_, "
inheritance_code += helper.var_idx("Stage", self.b2b_num - 1) + "_"
inheritance_code += ">"
code_body = ""
using_code= self.gen_using()
func_code = self.gen_member_func(first_use_1stage)
code_body = using_code + func_code
class_code = gen_ir.gen_template_class(self.gen_class_name, template_param, code_body, inheritance_code = inheritance_code)
code = self.gen_include_header()
code += gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("threadblock", class_code)))
# print(code)
return code
class gen_b2b_mma_base:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = gen_class_name
self.template_param = template_param
self.b2b_num = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dirs}cutlass/aligned_buffer.h\"
#include \"{cutlass_dirs}cutlass/arch/memory.h\"
#include \"{cutlass_dirs}cutlass/array.h\"
#include \"{cutlass_dirs}cutlass/cutlass.h\"
#include \"{cutlass_dirs}cutlass/gemm/gemm.h\"
#include \"{cutlass_dirs}cutlass/matrix_shape.h\"
#include \"{cutlass_dirs}cutlass/numeric_types.h\"\n'''.format(cutlass_dirs=self.cutlass_deps_root)
return code
def gen_shared_storage(self):
code = \
" template< \n\
typename Shape_,\n\
typename Policy_,\n\
int ThisStage_\n\
>\n\
class SharedStorage {\n\
public:\n\
using Shape = Shape_;\n\
using Policy = Policy_;\n\
static int const ThisStage = ThisStage_;\n\
using Operator = typename Policy::Operator;\n\
\
using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;\n\
\
/// Tensor reference to the B operand \n\
using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;\n\
\n\
/// Shape of the A matrix operand in shared memory \n\
using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow,\n\
Shape::kK * ThisStage +\n\
Policy::SmemPaddingA::kColumn>;\n\
\n\
/// Shape of the B matrix operand in shared memory\n\
using ShapeB =\n\
MatrixShape<Shape::kK * ThisStage + Policy::SmemPaddingB::kRow,\n\
Shape::kN + Policy::SmemPaddingB::kColumn>;\n\
\n\
public:\n\
\n\
/// Buffer for A operand\n\
AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A;\n\
\n\
/// Buffer for B operand\n\
AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;\n\
\n\
public:\n\
\n\
/// Returns a layout object for the A matrix\n\
CUTLASS_DEVICE\n\
static typename Operator::LayoutA LayoutA() {\n\
return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});\n\
}\n\
\n\
/// Returns a layout object for the B matrix\n\
CUTLASS_HOST_DEVICE\n\
static typename Operator::LayoutB LayoutB() {\n\
return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});\n\
}\n\
\n\
/// Returns a TensorRef to the A operand\n\
CUTLASS_HOST_DEVICE\n\
TensorRefA operand_A_ref() {\n\
return TensorRefA{operand_A.data(), LayoutA()};\n\
}\n\
\n\
/// Returns a TensorRef to the B operand\n\
CUTLASS_HOST_DEVICE\n\
TensorRefB operand_B_ref() {\n\
return TensorRefB{operand_B.data(), LayoutB()};\n\
}\n\
CUTLASS_HOST_DEVICE\n\
void * get_B_Shared_ptr() {\n\
return operand_B.data();\n\
}\n\
};\n"
return code
def gen_using_and_misc(self, b2b_num):
code_using = ""
for i in range(b2b_num):
code_using += "using Operator" +str(i) + " = typename Policy" + str(i) +"::Operator;\n"
for i in range(b2b_num):
code_using += "using WarpGemm" +str(i) + " = typename Policy" + str(i) +"::Operator::Shape;\n"
for i in range(b2b_num):
code_using += "using WarpCount" +str(i) + " = GemmShape<" + helper.var_idx("Shape", i) +"::kM / " + helper.var_idx("WarpGemm", i) +"::kM, "\
+ helper.var_idx("Shape", i) +"::kN / " + helper.var_idx("WarpGemm", i) +"::kN, "\
+ helper.var_idx("Shape", i) +"::kK / " + helper.var_idx("WarpGemm", i) +"::kK>;\n"
code_misc = ""
for i in range(b2b_num):
code_misc += "static int const " + helper.var_idx("kWarpGemmIterations", i) + " = (" + helper.var_idx("WarpGemm", i) + "::kK / " + helper.var_idx("Operator", i) +"::Policy::MmaShape::kK);\n"
code = code_using + code_misc + self.gen_shared_storage()
for i in range(b2b_num):
code += "using " + helper.var_idx("SharedStorage", i) + " = SharedStorage<" + helper.var_idx("Shape", i) + ", " + helper.var_idx("Policy", i) +", " + helper.var_idx("Stage", i) + ">;\n"
def gen_union_shared_storage(b2b_num):
code = ""
for i in range(b2b_num):
code += " " +helper.var_idx("SharedStorage", i) + " " + helper.var_idx("sharedStorage", i) +";\n"
return code
code += "union B2bMmaSharedStorage {\n" + gen_union_shared_storage(self.b2b_num) + "};\n"
for i in range(b2b_num - 1):
code += helper.var_idx("void * C", i) + "_smm_ptr;\n"
return code
def gen_protected(self):
code = "\nprotected:\n"
code += "typename Operator0::IteratorA warp_tile_iterator_A0_;\n"
for i in range(self.b2b_num):
code += "typename Operator" +str(i) + "::IteratorB" +" warp_tile_iterator_B" + str(i) + "_;\n"
return code
def gen_public_member(self):
code = "\npublic:\n"
code += "CUTLASS_DEVICE\n"
code += \
"B2bMmaBase(\n" + \
" B2bMmaSharedStorage & shared_storage,\n" + \
" int thread_idx,\n" + \
" int warp_idx,\n" + \
" int lane_idx\n" + \
"):\n" + \
" warp_tile_iterator_A0_(shared_storage.sharedStorage0.operand_A_ref(), lane_idx),\n"
for i in range(self.b2b_num):
final = ",\n"
if i == self.b2b_num-1:
final = "\n"
iterator = " warp_tile_iterator_B" + str(i) + "_"
shared_storage = "shared_storage.sharedStorage" + str(i) + ".operand_B_ref()"
code += iterator + "(" + shared_storage + ", lane_idx)" + final
code += "{\n"
for i in range(self.b2b_num - 1):
code += helper.var_idx(" C", i) + helper.var_idx("_smm_ptr = shared_storage.sharedStorage", i) + ".get_B_Shared_ptr();\n"
code += "}\n"
return code
def gen_code(self):
template_arg = []
for i in range(self.b2b_num):
template_arg.append(("typename", helper.var_idx("Shape", i)))
for i in range(self.b2b_num):
template_arg.append(("typename", helper.var_idx("Policy", i)))
for i in range(self.b2b_num):
template_arg.append((int, helper.var_idx("Stage", i)))
code_body = self.gen_using_and_misc(self.b2b_num)
code_body += self.gen_protected()
code_body += self.gen_public_member()
class_code = gen_ir.gen_template_class("B2bMmaBase", template_arg, code_body)
code = self.gen_include_header() + gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("threadblock", class_code)))
return code
class gen_threadblock:
def __init__(self, template_param, gen_class_name, b2b_num, output_dir, cutlass_deps_root, project_root):
self.gen_class_name = gen_class_name
self.template_param = template_param
self.b2b_num = b2b_num
self.file_dir = output_dir + "/threadblock/"
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
self.gen_b2b_mma_base = gen_b2b_mma_base(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
self.gen_b2b_mma_pipelined = gen_b2b_mme_pipelined(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
self.gen_default_b2b_mma = gen_default_b2b_mma(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
def gen_code(self, first_use_1stage):
base_code = self.gen_b2b_mma_base.gen_code()
print("[INFO]: Gen kernel code [b2b_mma_base.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "b2b_mma_base.h", "w+") as f:
f.write(base_code)
pipeline_code = self.gen_b2b_mma_pipelined.gen_code(first_use_1stage = first_use_1stage)
print("[INFO]: Gen kernel code [b2b_mma_pipelined.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "b2b_mma_pipelined.h", "w+") as f:
f.write(pipeline_code)
default_code = self.gen_default_b2b_mma.gen_code()
print("[INFO]: Gen kernel code [default_b2b_mma.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "default_b2b_mma.h", "w+") as f:
f.write(default_code)
| examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_threadblock.py/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_threadblock.py",
"repo_id": "examples",
"token_count": 24346
} | 8 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
///< Output operator
typename OutputOp0_,
typename OutputOp1_,
typename OutputOp2_,
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
bool StoreD0 = true,
bool StoreD1 = true,
int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large
(!IsEpilogueFunctorHeavy<OutputOp0_>::value)
>
class DualEpilogue {
public:
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
static bool constexpr kStoreD0 = StoreD0;
static bool constexpr kStoreD1 = StoreD1;
using OutputTileIterator = OutputTileIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp0 = OutputOp0_;
using OutputOp1 = OutputOp1_;
using OutputOp2 = OutputOp2_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
struct SharedStorage {
using Element = typename WarpTileIterator::Element;
/// Tensor reference to shared memory allocation
using TensorRef = typename WarpTileIterator::TensorRef;
/// Logical shape of the shared memory tile written to by all warps.
using Shape = typename Base::Shape;
/// Shape of the shared memory allocation for the epilogue
using StorageShape = typename Base::SharedStorage::StorageShape;
//
// Data members
//
AlignedBuffer<Element, StorageShape::kCount> storage[2];
//
// Methods
//
/// Returns a tensor reference to the shared memory buffer
CUTLASS_DEVICE
TensorRef reference(int i) {
return TensorRef(
storage[i].data(),
Layout::packed({StorageShape::kRow, StorageShape::kColumn}));
}
};
static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = SharedStorage::StorageShape::kCount / kSmemTiles;
public:
static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator0_;
SharedLoadIterator shared_load_iterator1_;
/// Stores a warp's fragment of accumulators to SMEM
WarpTileIterator warp_tile_iterator0_;
WarpTileIterator warp_tile_iterator1_;
public:
/// Constructor
CUTLASS_DEVICE
DualEpilogue(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
shared_load_iterator0_(shared_storage.reference(0), thread_idx),
shared_load_iterator1_(shared_storage.reference(1), thread_idx),
warp_tile_iterator0_(shared_storage.reference(0), lane_idx),
warp_tile_iterator1_(shared_storage.reference(1), lane_idx)
{
int warp_k = warp_idx / (WarpCount::kM * WarpCount::kN);
int warp_mn = warp_idx % (WarpCount::kM * WarpCount::kN);
int warp_m = warp_mn % WarpCount::kM;
int warp_n = warp_mn / WarpCount::kM;
MatrixCoord warp_offset{warp_k * WarpCount::kM + warp_m, warp_n};
warp_tile_iterator0_.add_tile_offset(warp_offset);
warp_tile_iterator1_.add_tile_offset(warp_offset);
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp0 const &output_op0,
OutputOp1 const &output_op1,
OutputOp2 const &output_op2,
OutputTileIterator dest0,
OutputTileIterator dest1,
OutputTileIterator dest2,
AccumulatorTile const &accumulator0,
AccumulatorTile const &accumulator1,
OutputTileIterator source_iterator[2],
bool writeToD2 // true if it's the final split-k
) {
// TODO: Implement when no source is needed
typename OutputTileIterator::Fragment source_fragment[2];
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
source_fragment[i].clear();
}
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator[2] = {accumulator0, accumulator1};
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Load the source
//
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
source_iterator[i].load(source_fragment[i]);
++source_iterator[i];
}
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator[0], this->warp_tile_iterator0_);
acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator[1], this->warp_tile_iterator1_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment0[kPartitionsK];
typename SharedLoadIterator::Fragment aligned_accum_fragment1[kPartitionsK];
shared_load_iterator0_.load(aligned_accum_fragment0[0]);
shared_load_iterator1_.load(aligned_accum_fragment1[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
if (kPartitionsK > 1) {
plus <typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for ( int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator0_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator1_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator0_.load(aligned_accum_fragment0[i]);
shared_load_iterator1_.load(aligned_accum_fragment1[i]);
aligned_accum_fragment0[0] = add_fragments(aligned_accum_fragment0[0], aligned_accum_fragment0[i]);
aligned_accum_fragment1[0] = add_fragments(aligned_accum_fragment1[0], aligned_accum_fragment1[i]);
}
shared_load_iterator0_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
shared_load_iterator1_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Compute the output result
//
typename OutputTileIterator::Fragment output_fragment[3];
apply_output_operator_(output_fragment,
output_op0, output_op1, output_op2,
aligned_accum_fragment0[0], aligned_accum_fragment1[0],
source_fragment);
//
// Store the final result
//
if (kStoreD0) {
dest0.store(output_fragment[0]);
++dest0;
}
if (kStoreD1) {
dest1.store(output_fragment[1]);
++dest1;
}
if (writeToD2) {
dest2.store(output_fragment[2]);
++dest2;
}
}
}
private:
static_assert(kPartitionsK == 1 || Base::kFragmentsPerIteration == 1, "One of these must be exactly 1.");
template<class Seq>
struct acc2smem_source_needed;
template <size_t... Seq>
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
template<int Advance>
CUTLASS_DEVICE
static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator &warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const &iterator_begin,
WarpTileIterator &warp_tile_iterator) {
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
typename OutputTileIterator::Fragment (&output_fragment)[3],
OutputOp0 const &output_op0,
OutputOp1 const &output_op1,
OutputOp2 const &output_op2,
typename SharedLoadIterator::Fragment const& aligned_accum_fragment0,
typename SharedLoadIterator::Fragment const& aligned_accum_fragment1,
typename OutputTileIterator::Fragment const (&source_fragment)[2]) {
OutputAccessType* output_frag_ptr[3] = {
reinterpret_cast<OutputAccessType *>(&output_fragment[0]),
reinterpret_cast<OutputAccessType *>(&output_fragment[1]),
reinterpret_cast<OutputAccessType *>(&output_fragment[2])
};
AccumulatorAccessType const *compute_frag_ptr[2] = {
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment0),
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment1)
};
OutputAccessType const *source_frag_ptr[2] = {
reinterpret_cast<OutputAccessType const *>(&source_fragment[0]),
reinterpret_cast<OutputAccessType const *>(&source_fragment[1])
};
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operators
output_frag_ptr[0][i] = output_op0(compute_frag_ptr[0][i], source_frag_ptr[0][i]);
output_frag_ptr[1][i] = output_op1(compute_frag_ptr[1][i], source_frag_ptr[1][i]);
output_frag_ptr[2][i] = output_op2(output_frag_ptr[0][i], output_frag_ptr[1][i]);
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/45_dual_gemm/threadblock/dual_epilogue.h/0 | {
"file_path": "examples/45_dual_gemm/threadblock/dual_epilogue.h",
"repo_id": "examples",
"token_count": 5594
} | 9 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cute/tensor.hpp"
#include "cutlass/arch/arch.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/collective_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
namespace example {
//
// GETT entry point
//
template <
class ProblemShapeMNKL,
class ElementA,
class StrideA,
class ElementB,
class StrideB,
class ElementAccumulator,
class ElementC,
class StrideC,
class ElementD,
class StrideD,
class ElementEpilogue>
cutlass::Status
gett_kernel(
ProblemShapeMNKL problem_shape_mnkl,
ElementA const* ptr_A, StrideA stride_a_mkl,
ElementB const* ptr_B, StrideB stride_b_nkl,
ElementAccumulator _,
ElementC const* ptr_C, StrideC stride_c_mnl,
ElementD * ptr_D, StrideD stride_d_mnl,
ElementEpilogue alpha, ElementEpilogue beta,
cudaStream_t stream = 0) {
using namespace cute;
// TileShape -- GETT configuration
// Specify the number of elements to take from each mode
// BLK_M = (M0,M1,...) BLK_N = (M0,M1,...) BLK_K = (K0,K1,...)
// Take 128 from m0, 128 from n0, 64 from k0
using TileShape = Shape<Shape<_128>, Shape<_128>, Shape<_64>>;
/* Other examples:
* Take 32 elements from m0 and 4 elements from m1
* Take 64 elements from n0 and 2 elements from n1
* Take 8 elements from k0 and 8 elements from k1
**/
// using TileShape = Shape<Shape<_32,_4>, Shape<_64,_2>, Shape<_8,_8>>;
using EpilogueThreadOp = cutlass::epilogue::thread::LinearCombination<
ElementD, 1, ElementAccumulator, ElementEpilogue, cutlass::epilogue::thread::ScaleType::Default,
cutlass::FloatRoundStyle::round_to_nearest, ElementC>;
// No changes are required to the default epilogue
using CollectiveEpilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter<
cutlass::epilogue::collective::DefaultEpilogue<
StrideC,
StrideD,
EpilogueThreadOp,
cutlass::gemm::EpilogueDefault>>;
// CollectiveMma for GETTs can be built using the CollectiveBuilders
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, StrideA, 128 / cutlass::sizeof_bits<ElementA>::value,
ElementB, StrideB, 128 / cutlass::sizeof_bits<ElementB>::value,
ElementAccumulator,
TileShape, Shape<_1,_2,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<
static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
// The GETT kernel is a composition of a collective mainloop and epilogue, just like any 3.x GEMM
using GettKernel = cutlass::gemm::kernel::GemmUniversal<
ProblemShapeMNKL,
CollectiveMainloop,
CollectiveEpilogue>;
using GettOperator = cutlass::gemm::device::GemmUniversalAdapter<GettKernel>;
typename GettOperator::Arguments args {
cutlass::gemm::GemmUniversalMode::kBatched,
problem_shape_mnkl,
{ ptr_A, stride_a_mkl, ptr_B, stride_b_nkl },
{ {alpha, beta}, ptr_C, stride_c_mnl, ptr_D, stride_d_mnl }
};
#if CUTLASS_DEBUG_TRACE_LEVEL > 0
print("Problem shape:");
print("\tM: "); print(cute::get<0>(problem_shape_mnkl)); print("\n");
print("\tN: "); print(cute::get<1>(problem_shape_mnkl)); print("\n");
print("\tK: "); print(cute::get<2>(problem_shape_mnkl)); print("\n");
print("\tL: "); print(cute::get<3>(problem_shape_mnkl)); print("\n");
print("TileSape:"); print(TileShape{}); print("\n");
#endif
GettOperator op;
return op(args, stream);
}
} // namespace example
| examples/51_hopper_gett/gett_kernel.cuh/0 | {
"file_path": "examples/51_hopper_gett/gett_kernel.cuh",
"repo_id": "examples",
"token_count": 1911
} | 10 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cute/tensor.hpp"
#include <cuda.h>
#include "helper.h"
template <class QuantizedElement,
class DequantizedElement,
class OperandLayout,
class ElementScale,
class ElementZero,
class ScaleBroadCastLayout,
class ThrLayout>
__global__ void dequantize_weight_kernel(DequantizedElement* dq_buffer,
QuantizedElement const* q_buffer,
OperandLayout const operand_layout,
ElementScale const* scale_buffer,
ElementZero const* zero_buffer,
ScaleBroadCastLayout const broadcasted_scale_layout,
ThrLayout thr_layout) {
using namespace cute;
// Represent the full tensors to gmem elements.
// These are expected to have shape [MN, K, L]
Tensor gmem_op_dq = make_tensor(make_gmem_ptr(dq_buffer), operand_layout);
auto init_quantized_iterator = [&]() {
if constexpr (cute::sizeof_bits_v<QuantizedElement> >= 8) {
return make_gmem_ptr(q_buffer);
} else {
return subbyte_iterator<const QuantizedElement>(q_buffer);
}
};
Tensor gmem_op_q = make_tensor(init_quantized_iterator(), operand_layout);
// While the scales are expected to have shape [MN, G, L] but with a stride to allow broadcasting
// It is expected that K % G == 0
Tensor gmem_scale_broadcasted = make_tensor(make_gmem_ptr(scale_buffer), broadcasted_scale_layout);
Tensor gmem_zero_broadcasted = make_tensor(make_gmem_ptr(zero_buffer), broadcasted_scale_layout);
// Assign 1 thread per element in the thread block
auto blk_shape = make_shape(size<0>(thr_layout), _1{}, _1{}); //
auto blk_coord = make_coord(_, blockIdx.x, blockIdx.y); // (MN, K, L)
// Tile across the block
auto gOp_dq = local_tile(gmem_op_dq, blk_shape, blk_coord);
auto gScale = local_tile(gmem_scale_broadcasted, blk_shape, blk_coord);
auto gZero = local_tile(gmem_zero_broadcasted, blk_shape, blk_coord);
auto gOp_q = local_tile(gmem_op_q, blk_shape, blk_coord);
auto tOpDq_gOpDq = local_partition(gOp_dq, thr_layout, threadIdx.x);
auto tScale_gScale = local_partition(gScale, thr_layout, threadIdx.x);
auto tZero_gZero = local_partition(gZero, thr_layout, threadIdx.x);
auto tOpQ_gOpQ = local_partition(gOp_q, thr_layout, threadIdx.x);
// Make a fragment of registers to hold gmem loads
Tensor rmem_op_q = make_fragment_like(tOpQ_gOpQ(_, _, _, 0));
Tensor rmem_scale = make_fragment_like(tScale_gScale(_, _, _, 0));
Tensor rmem_zero = make_fragment_like(tZero_gZero(_, _, _, 0));
Tensor rmem_op_dq = make_fragment_like(tOpDq_gOpDq(_, _, _, 0));
Tensor rmem_op_scaled = make_fragment_like<ElementScale>(rmem_op_dq);
Tensor rmem_zero_buf = make_fragment_like<ElementScale>(rmem_zero);
Tensor pred_id = make_identity_tensor(shape(operand_layout));
auto pred_blk_tile = local_tile(pred_id, blk_shape, blk_coord);
auto pred_thr_partition = local_partition(pred_blk_tile, thr_layout, threadIdx.x);
const auto num_iters = size<3>(tOpDq_gOpDq);
for (int ii = 0; ii < num_iters; ++ii) {
const auto thread_offset = get<0>(pred_thr_partition(0, 0, 0, ii));
if (thread_offset < size<0>(operand_layout)) {
copy(tOpQ_gOpQ(_, _, _, ii), rmem_op_q);
copy(tScale_gScale(_, _, _, ii), rmem_scale);
copy(tZero_gZero(_, _, _, ii), rmem_zero);
transform(rmem_op_q, rmem_op_scaled, [] (const QuantizedElement& elt) { return ElementScale(elt); } );
transform(rmem_zero, rmem_zero_buf, [] (const ElementZero& elt) { return ElementScale(elt); } );
transform(rmem_op_scaled, rmem_scale, rmem_op_scaled, multiplies{});
transform(rmem_op_scaled, rmem_zero_buf, rmem_op_scaled, plus{});
transform(rmem_op_scaled, rmem_op_dq, [] (const ElementScale& elt) { return DequantizedElement(elt); } );
copy(rmem_op_dq, tOpDq_gOpDq(_, _, _, ii));
}
}
}
template <class QuantizedElement,
class DequantizedElement,
class OperandLayout,
class ElementScale,
class ElementZero,
class ScaleLayout>
void dequantize_weight(DequantizedElement* dq_buffer,
QuantizedElement const* q_buffer,
OperandLayout const operand_layout,
ElementScale const* scale_buffer,
ElementZero const* zero_buffer,
ScaleLayout const scale_layout,
int const group_size) {
using namespace cute;
constexpr int tpb = 128;
auto thr_layout = make_layout(make_shape(Int<tpb>{}));
const auto num_rows = get<0>(shape(operand_layout));
const auto gemm_k = get<1>(shape(operand_layout)); // [MN, K, L]
const auto batches = get<2>(shape(operand_layout)); // [MN, K, L]
const auto scale_k = get<1>(shape(scale_layout)); // [MN, Scale_K, L]
if (num_rows != size<0>(scale_layout)) {
std::cerr << "Invalid first dimension for scales. Must match first dim for weights."
<< " But got shapes " << shape(operand_layout) << " " << shape(scale_layout)
<< std::endl;
exit(-1);
}
const auto scale_stride0 = get<0>(stride(scale_layout));
const auto scale_stride1 = get<1>(stride(scale_layout));
const auto scale_stride2 = get<2>(stride(scale_layout));
auto scale_shape_bcast = make_shape(num_rows, make_shape(group_size, scale_k), batches);
auto scale_stride_bcast = make_stride(scale_stride0, make_stride(0, scale_stride1), scale_stride2);
auto scale_layout_bcast = make_layout(scale_shape_bcast, scale_stride_bcast);
const auto blocks_x = gemm_k;
const auto blocks_y = batches;
dim3 blocks(blocks_x, blocks_y, 1);
dequantize_weight_kernel<<<blocks, tpb>>>(dq_buffer, q_buffer, operand_layout, scale_buffer, zero_buffer, scale_layout_bcast, thr_layout);
CUDA_CHECK(cudaDeviceSynchronize());
}
| examples/55_hopper_mixed_dtype_gemm/unfused_weight_dequantize.hpp/0 | {
"file_path": "examples/55_hopper_mixed_dtype_gemm/unfused_weight_dequantize.hpp",
"repo_id": "examples",
"token_count": 3052
} | 11 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/atom/copy_atom.hpp>
#include <cute/algorithm/copy.hpp>
#include <cute/tensor_impl.hpp>
#include <cute/tensor_predicate.hpp>
namespace cute
{
template <uint32_t NumThreads,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE void
naive_cooperative_copy(uint32_t const& tid,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
auto N = size(src);
if (tid < N) {
uint32_t upper_bound = (N / NumThreads) * NumThreads;
CUTE_UNROLL
for (uint32_t i = 0; i < upper_bound; i += NumThreads) { // All in-bounds
dst[tid + i] = src[tid + i];
}
if (N % NumThreads != 0) { // Likely static condition
uint32_t final_idx = tid + upper_bound;
if (final_idx < N) { // Final in-bounds
dst[final_idx] = src[final_idx];
}
}
}
}
// Accept mutable temporaries
template <uint32_t NumThreads,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE void
naive_cooperative_copy(uint32_t const& tid,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return naive_cooperative_copy(tid, src, dst);
}
// A heuristic to determine a "good" permutation of two tensors for later vectorization and thr-assignment
template <class AEngine, class ALayout,
class BEngine, class BLayout>
CUTE_HOST_DEVICE constexpr
auto
heuristic_permutation(Tensor<AEngine, ALayout> const& a,
Tensor<BEngine, BLayout> const& b)
{
constexpr bool swizzleA = get_swizzle_t<AEngine>::num_bits != 0 or
get_swizzle_t<ALayout>::num_bits != 0;
constexpr bool swizzleB = get_swizzle_t<BEngine>::num_bits != 0 or
get_swizzle_t<BLayout>::num_bits != 0;
auto a_inv = right_inverse(get_nonswizzle_portion(a.layout()));
auto b_inv = right_inverse(get_nonswizzle_portion(b.layout()));
constexpr uint8_t scoreA = (uint8_t(swizzleA) << 2) |
(uint8_t(is_smem<AEngine>::value) << 1) |
(uint8_t(size(a_inv) > size(b_inv)) << 0);
constexpr uint8_t scoreB = (uint8_t(swizzleB) << 2) |
(uint8_t(is_smem<BEngine>::value) << 1) |
(uint8_t(size(b_inv) > size(a_inv)) << 0);
if constexpr (scoreA >= scoreB) {
return a_inv;
} else {
return b_inv;
}
}
// cooperative_copy<NumThreads, MaxVecBits>(thr_idx, src, dst)
// Use NumThreads to copy Tensor src to Tensor dst with element-wise vectorization up to MaxVecBits.
// @pre 0 <= @a tid < NumThreads
// @pre Tensors @a src and @a dst are aligned up to MaxVecBits.
// That is, pointers and dynamic strides are assumed to be aligned up to MaxVecBits.
//
template <uint32_t NumThreads, uint32_t MaxVecBits,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
cooperative_copy(uint32_t const& tid,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
// Assumes the shapes are static, can generalize/fallback
CUTE_STATIC_ASSERT_V(is_static<decltype(shape(src))>{} && is_static<decltype(shape(dst))>{});
CUTE_STATIC_ASSERT_V(size(src) == size(dst));
// Assumes the types are the same, can generalize/fallback
static_assert(cute::is_same<typename SrcEngine::value_type, typename DstEngine::value_type>::value);
static_assert(MaxVecBits == sizeof_bits_v<typename SrcEngine::value_type> ||
MaxVecBits == 8 || MaxVecBits == 16 || MaxVecBits == 32 || MaxVecBits == 64 || MaxVecBits == 128,
"Expected MaxVecBits to be value size or 8 or 16 or 32 or 64 or 128 for alignment and performance.");
// Check that the tensors are likely shared across threads: either gmem or smem
static_assert((is_gmem<SrcEngine>::value || is_smem<SrcEngine>::value),
"cooperative_copy expects shared gmem or smem source tensor.");
static_assert((is_gmem<DstEngine>::value || is_smem<DstEngine>::value),
"cooperative_copy expects shared gmem or smem destination tensor.");
// Precondition on tid in DEBUG
assert(tid < NumThreads);
// Precondition on pointer alignment in DEBUG
assert(is_byte_aligned<ceil_div(MaxVecBits,8u)>(raw_pointer_cast(src.data())));
assert(is_byte_aligned<ceil_div(MaxVecBits,8u)>(raw_pointer_cast(dst.data())));
#if 0
if (thread0()) {
print(" "); print("cooperative_copy\n");
print(" "); print("NumThreads: "); print(NumThreads); print("\n");
print(" "); print("MaxVecBits: "); print(MaxVecBits); print("\n");
print(" "); print("src: "); print(src); print("\n");
print(" "); print("dst: "); print(dst); print("\n");
}
#ifdef __CUDA_ARCH__
__syncthreads();
#endif
#endif
// The common layout of the two tensors that can be vectorized over elements and threads
// vidx -> coord
auto common_layout = heuristic_permutation(src, dst);
// Apply
// (V, rest)
Tensor src_a = coalesce(logical_divide(src, common_layout), Shape<_1,_1>{});
Tensor dst_a = coalesce(logical_divide(dst, common_layout), Shape<_1,_1>{});
//
// Determine vectorization of elems and thrs based on src/dst size and number of threads
// NOTE: This heuristic promotes parallelization over vectorization
//
// The number of elements and number of bits
constexpr int elem_bits = sizeof_bits_v<typename SrcEngine::value_type>;
constexpr int total_elem = size(SrcLayout{});
// The number of elements that can be vectorized in values
constexpr int common_elem = decltype(max_common_vector(src_a, dst_a))::value;
#if 0
if (thread0()) {
print(" "); print("common_layout: "); print(common_layout); print("\n");
print(" "); print("src_a: "); print(src_a); print("\n");
print(" "); print("dst_a: "); print(dst_a); print("\n");
}
#ifdef __CUDA_ARCH__
__syncthreads();
#endif
#endif
//
if constexpr (total_elem % NumThreads != 0) {
// Not attempting to find a partitioning pattern, fallback to dynamically indexed slowpath
if constexpr (common_elem > 1 && MaxVecBits > elem_bits) {
// If the vectorization is non-trivial and divides the maximum vectorizations, then vectorize
constexpr auto max_align_src = elem_bits * decltype(max_alignment(src_a.layout()))::value;
constexpr auto max_align_dst = elem_bits * decltype(max_alignment(dst_a.layout()))::value;
constexpr auto vec_bits = gcd(max_align_src, max_align_dst, MaxVecBits);
using VecType = uint_bit_t<vec_bits>;
static_assert(vec_bits % elem_bits == 0, "Expected divisibility");
static_assert((vec_bits >= 8), "No support for subbyte copying");
Tensor src_v = recast<VecType const>(src_a);
Tensor dst_v = recast<VecType >(dst_a);
#if 0
if (thread0()) {
print(" "); print("cooperative_copy -- naive\n");
print(" "); print("src_v: "); print(src_v); print("\n");
print(" "); print("dst_v: "); print(dst_v); print("\n");
}
#ifdef __CUDA_ARCH__
__syncthreads();
#endif
#endif
naive_cooperative_copy<NumThreads>(tid, src_v, dst_v);
} else {
naive_cooperative_copy<NumThreads>(tid, src_a, dst_a);
}
} else {
// If the tensors can be equally partitioned by the threads,
// compute vectorization widths in elements and threads.
// If there are too many threads to allow a full vectorized copy, trunc the vectorization
constexpr int total_bits = total_elem * elem_bits;
constexpr int max_bits_per_thr = total_bits / NumThreads;
// At least elem_bits, at most common_bits
constexpr int common_bits = common_elem * elem_bits;
constexpr int vec_bits = cute::max(elem_bits, cute::gcd(common_bits, int(MaxVecBits), max_bits_per_thr));
// Should account for vec_bits < 8 and/or vec_elem <= 1
// And also account for subbyte types, which could cause race conditions
// Want to ENFORCE sufficient vectorization in those cases
static_assert(vec_bits % elem_bits == 0, "Expected divisibility");
static_assert(vec_bits >= 8, "No support for subbyte copying");
using VecType = uint_bit_t<vec_bits>;
constexpr int vec_elem = vec_bits / elem_bits;
constexpr int vec_thrs = cute::min(int(NumThreads), total_elem / vec_elem);
//
// Determine the partitioning patterns for the vec_elems and vec_thrs
//
// Distribute the rest of the V*T to some consistent portion outside of the common_layout, if needed
auto common_domain_src = domain_distribute(shape(src_a), Int<vec_elem*vec_thrs>{});
auto common_domain_dst = domain_distribute(shape(dst_a), Int<vec_elem*vec_thrs>{});
// Make sure for now, could fall back here instead
CUTE_STATIC_ASSERT_V(size(common_domain_src) == Int<vec_elem*vec_thrs>{});
CUTE_STATIC_ASSERT_V(compatible(common_domain_src, common_domain_dst) ||
compatible(common_domain_dst, common_domain_src));
// Use the "more specific" domain for the extra elements of V*T
auto common_domain = conditional_return(compatible(common_domain_src, common_domain_dst),
common_domain_dst, common_domain_src);
// Construct the tiler
auto tiler_vt = common_domain.with_shape(Int<vec_elem>{}, Int<vec_thrs>{});
// Apply and slice
Tensor src_v = logical_divide(src_a, tiler_vt)(make_coord(_,tid),_);
Tensor dst_v = logical_divide(dst_a, tiler_vt)(make_coord(_,tid),_);
#if 0
if (thread0()) {
print(" "); print("cooperative_copy -- vec\n");
print(" "); print("Used vector: "); print(vec_elem); print("\n");
print(" "); print("Used threads: "); print(vec_thrs); print("\n");
print(" "); print("tiler_vt: "); print(tiler_vt); print("\n");
print(" "); print("src_v: "); print(src_v); print("\n");
print(" "); print("dst_v: "); print(dst_v); print("\n");
print(" "); print("recast<VecType const>(src_v): "); print(recast<VecType const>(src_v)); print("\n");
print(" "); print("recast<VecType >(dst_v): "); print(recast<VecType >(dst_v)); print("\n");
}
#ifdef __CUDA_ARCH__
__syncthreads();
#endif
#endif
// If we're using all threads (static) or the tid is in-range (dynamic)
if (vec_thrs == NumThreads or tid < vec_thrs) {
return copy_if(TrivialPredTensor{}, recast<VecType const>(src_v), recast<VecType>(dst_v));
}
}
}
// Default max-vectorization size to value_type size
template <uint32_t NumThreads,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
cooperative_copy(uint32_t const& tid,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> & dst)
{
constexpr uint32_t MaxVecBits = sizeof_bits_v<typename SrcEngine::value_type>;
return cooperative_copy<NumThreads, MaxVecBits>(tid, src, dst);
}
//
// Accept mutable temporaries
//
template <uint32_t NumThreads,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
cooperative_copy(uint32_t const& tid,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return cooperative_copy<NumThreads>(tid, src, dst);
}
template <uint32_t NumThreads, uint32_t MaxVecBits,
class SrcEngine, class SrcLayout,
class DstEngine, class DstLayout>
CUTE_HOST_DEVICE
void
cooperative_copy(uint32_t const& tid,
Tensor<SrcEngine, SrcLayout> const& src,
Tensor<DstEngine, DstLayout> && dst)
{
return cooperative_copy<NumThreads, MaxVecBits>(tid, src, dst);
}
} // end namespace cute
| include/cute/algorithm/cooperative_copy.hpp/0 | {
"file_path": "include/cute/algorithm/cooperative_copy.hpp",
"repo_id": "include",
"token_count": 5818
} | 12 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#if !defined(__CUDACC_RTC__)
#include <cuda.h>
#include <cinttypes>
#endif
#include <cute/config.hpp>
#include <cute/arch/copy.hpp>
#include <cute/arch/copy_sm90.hpp>
#include <cute/container/alignment.hpp>
#include <cute/container/bit_field.hpp>
#include <cute/container/array.hpp>
#include <cute/numeric/numeric_types.hpp>
namespace cute
{
//////////////////////////////////////////////////////////////////////////////////////////////////////
/// Barriers are 64-bit of user-managed information used in broadly two types syncronization patterns
/// 1) arrive/wait on threads (usage: cp.async and warp-specialized kernels)
/// 2) transaction-based (usage: TMA transaction where a CTA issues one transaction)
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Initialize barrier present in shared memory
CUTE_HOST_DEVICE
void
initialize_barrier(uint64_t& smem_barrier, // 64 bits user-manged barrier in smem
int thread_count = 1) // Thread count expected to arrive/wait on this barrier
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_barrier);
asm volatile ("mbarrier.init.shared::cta.b64 [%0], %1;\n"
:: "r"(smem_int_ptr),
"r"(thread_count));
#endif
}
// Set the number of bytes transfered per transaction and perform an arrive operation as well
CUTE_HOST_DEVICE
void
set_barrier_transaction_bytes(uint64_t& smem_barrier, // 64 bits user-manged barrier in smem
uint32_t bytes) // Number of bytes transfered by per TMA transaction
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_barrier);
asm volatile ("mbarrier.arrive.expect_tx.shared::cta.b64 _, [%0], %1;\n"
:: "r"(smem_int_ptr),
"r"(bytes));
#endif
}
// Barrier wait
CUTE_HOST_DEVICE
void
wait_barrier(uint64_t& smem_barrier, // 64 bits user-manged barrier in smem
int phase_bit) // Current phase bit the barrier waiting to flip
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_barrier);
asm volatile(
"{\n"
".reg .pred P1;\n"
"LAB_WAIT:\n"
"mbarrier.try_wait.parity.shared::cta.b64 P1, [%0], %1;\n"
"@P1 bra DONE;\n"
"bra LAB_WAIT;\n"
"DONE:\n"
"}\n"
:: "r"(smem_int_ptr),
"r"(phase_bit));
#endif
}
// Barrier arrive
CUTE_HOST_DEVICE
void
arrive_barrier(uint64_t& smem_barrier) // 64 bits user-manged barrier in smem
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_barrier);
asm volatile(
"{\n"
".reg .b64 state; \n"
"mbarrier.arrive.shared::cta.b64 state, [%0];\n"
"}\n"
:: "r"(smem_int_ptr));
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// TMA Descriptor and utilities
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace TMA {
enum class SmemSwizzleBits : uint8_t {
DISABLE = 0,
B32 = 1,
B64 = 2,
B128 = 3,
};
enum class OOBFill : uint8_t {
ZERO = 0,
CONSTANT = 1,
};
CUTE_HOST_DEVICE char const* to_string(OOBFill const& t) {
switch (t) {
case OOBFill::ZERO: return "ZERO";
case OOBFill::CONSTANT: return "CONSTANT";
}
return nullptr;
}
enum class L2Promotion : uint8_t {
DISABLE = 0,
B64 = 1,
B128 = 2,
B256 = 3,
};
CUTE_HOST_DEVICE char const* to_string(L2Promotion const& t) {
switch (t) {
case L2Promotion::DISABLE: return "DISABLE";
case L2Promotion::B64: return "B64";
case L2Promotion::B128: return "B128";
case L2Promotion::B256: return "B256";
}
return nullptr;
}
// Aux parameters which are independent with the problem size
struct DescriptorAuxParams {
OOBFill oobfill_ = OOBFill::ZERO;
L2Promotion l2promo_ = L2Promotion::DISABLE;
};
enum class CacheHintSm90 : uint64_t {
EVICT_NORMAL = 0x1000000000000000,
EVICT_FIRST = 0x12F0000000000000,
EVICT_LAST = 0x14F0000000000000,
};
#if (__CUDACC_VER_MAJOR__ >= 12)
#if !defined(__CUDACC_RTC__)
/// @return The TMA descriptor datatype enum corresponding to T.
template <class T>
inline CUtensorMapDataType
to_CUtensorMapDataType() {
if constexpr (is_same_v<T, int8_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT8; } else
if constexpr (is_same_v<T, uint8_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT8; } else
if constexpr (is_same_v<T, float_e4m3_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT8; } else
if constexpr (is_same_v<T, float_e5m2_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT8; } else
if constexpr (is_same_v<T, uint16_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT16; } else
if constexpr (is_same_v<T, uint32_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT32; } else
if constexpr (is_same_v<T, uint64_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT64; } else
if constexpr (is_same_v<T, int32_t>) { return CU_TENSOR_MAP_DATA_TYPE_INT32; } else
if constexpr (is_same_v<T, int64_t>) { return CU_TENSOR_MAP_DATA_TYPE_INT64; } else
if constexpr (is_same_v<T, half_t>) { return CU_TENSOR_MAP_DATA_TYPE_FLOAT16; } else
if constexpr (is_same_v<T, float>) { return CU_TENSOR_MAP_DATA_TYPE_FLOAT32; } else
if constexpr (is_same_v<T, double>) { return CU_TENSOR_MAP_DATA_TYPE_FLOAT64; } else
if constexpr (is_same_v<T, bfloat16_t>) { return CU_TENSOR_MAP_DATA_TYPE_BFLOAT16; } else
if constexpr (is_same_v<T, tfloat32_t>) { return CU_TENSOR_MAP_DATA_TYPE_TFLOAT32; } else
{ static_assert(sizeof(T) < 0, "Unknown TMA Format!"); }
}
inline CUtensorMapSwizzle
to_CUtensorMapSwizzle(SmemSwizzleBits const& t) {
switch (t) {
default: assert(false && "Unknown SmemSwizzleBits!");
case SmemSwizzleBits::DISABLE: return CU_TENSOR_MAP_SWIZZLE_NONE;
case SmemSwizzleBits::B32: return CU_TENSOR_MAP_SWIZZLE_32B;
case SmemSwizzleBits::B64: return CU_TENSOR_MAP_SWIZZLE_64B;
case SmemSwizzleBits::B128: return CU_TENSOR_MAP_SWIZZLE_128B;
}
}
inline CUtensorMapFloatOOBfill
to_CUtensorMapFloatOOBfill(OOBFill const& t) {
switch(t) {
default: assert(false && "Unknown OOBFill!");
case OOBFill::ZERO: return CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE;
case OOBFill::CONSTANT: return CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA;
}
}
inline CUtensorMapL2promotion
to_CUtensorMapL2promotion(L2Promotion const& t) {
switch(t) {
default: assert(false && "Unknown L2Promotion!");
case L2Promotion::DISABLE: return CU_TENSOR_MAP_L2_PROMOTION_NONE;
case L2Promotion::B64: return CU_TENSOR_MAP_L2_PROMOTION_L2_64B;
case L2Promotion::B128: return CU_TENSOR_MAP_L2_PROMOTION_L2_128B;
case L2Promotion::B256: return CU_TENSOR_MAP_L2_PROMOTION_L2_256B;
}
}
#endif // !defined(__CUDACC_RTC__)
#endif // (__CUDACC_VER_MAJOR__ >= 12)
} // end namespace TMA
#if (__CUDACC_VER_MAJOR__ >= 12) && !defined(__CUDACC_RTC__)
using TmaDescriptor = CUtensorMap;
using Im2ColTmaDescriptor = CUtensorMap;
#else
using TmaDescriptor = struct alignas(64) { char bytes[128]; };
using Im2ColTmaDescriptor = struct alignas(64) { char bytes[128]; };
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Initiates a TensorMap Prefetch
////////////////////////////////////////////////////////////////////////////////////////////////////
CUTE_HOST_DEVICE
void
prefetch_tma_descriptor(TmaDescriptor const* desc_ptr)
{
#if defined(CUTE_ARCH_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
// Prefetch TMA Descriptor using generic addressing (i.e. no specific state space: const or param)
asm volatile (
"prefetch.tensormap [%0];"
:
: "l"(gmem_int_desc)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Trying to use TMA Descriptor Prefetch without CUTE_ARCH_TMA_SM90_ENABLED.");
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Perform a TensorMap modification (by each field)
////////////////////////////////////////////////////////////////////////////////////////////////////
// Replace tensor pointer directly in GMEM
CUTE_HOST_DEVICE
void
tma_descriptor_replace_addr_in_global_mem(TmaDescriptor const* desc_ptr,
void const* const new_tensor_ptr)
{
#if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
uint64_t const new_desc_addr = reinterpret_cast<uint64_t>(new_tensor_ptr);
asm volatile (
"tensormap.replace.tile.global_address.global.b1024.b64 [%0], %1;"
:: "l"(gmem_int_desc), "l"(new_desc_addr));
#else
CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3");
#endif
}
// Replace tensor pointer by bringing the tensormap from GMEM into the shared memory
CUTE_HOST_DEVICE
void
tma_descriptor_replace_addr_in_shared_mem(TmaDescriptor& smem_desc,
void const* const new_tensor_ptr)
{
#if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED)
uint32_t smem_int_desc = cast_smem_ptr_to_uint(&smem_desc);
uint64_t const new_desc_addr = reinterpret_cast<uint64_t>(new_tensor_ptr);
uint64_t const smem_int64_desc = 0;
asm volatile (
"cvt.u64.u32 %0, %1;"
:: "l"(smem_int64_desc), "r"(smem_int_desc));
asm volatile (
"tensormap.replace.tile.global_address.shared::cta.b1024.b64 [%0], %1;"
:: "l"(smem_int64_desc), "l"(new_desc_addr));
#else
CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3");
#endif
}
// Replace tensor dims and strides for GEMMs by bringing the tensormap from GMEM into the shared memory
CUTE_HOST_DEVICE
void
tma_descriptor_replace_dims_strides_in_shared_mem(TmaDescriptor & smem_desc,
cute::array<uint32_t, 3> const& prob_shape,
cute::array<uint64_t, 3> const& prob_stride)
{
#if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED)
uint32_t smem_int_desc = cast_smem_ptr_to_uint(&smem_desc);
uint64_t const smem_int64_desc = 0;
asm volatile (
"cvt.u64.u32 %0, %1;"
:: "l"(smem_int64_desc), "r"(smem_int_desc));
asm volatile (
"tensormap.replace.tile.global_dim.shared::cta.b1024.b32 [%0], 0, %1;"
:: "l"(smem_int64_desc), "r"(prob_shape[0]));
asm volatile (
"tensormap.replace.tile.global_dim.shared::cta.b1024.b32 [%0], 1, %1;"
:: "l"(smem_int64_desc), "r"(prob_shape[1]));
asm volatile (
"tensormap.replace.tile.global_dim.shared::cta.b1024.b32 [%0], 2, %1;"
:: "l"(smem_int64_desc), "r"(prob_shape[2]));
// Strides must be a multiple of 16. Also, stride for the intermost dimension is implicitly 1
#if ((__CUDACC_VER_MAJOR__ > 12) || ((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ >= 5)))
// 4 LSBs are not included
asm volatile (
"tensormap.replace.tile.global_stride.shared::cta.b1024.b64 [%0], 0, %1;"
:: "l"(smem_int64_desc), "l"(prob_stride[1]));
asm volatile (
"tensormap.replace.tile.global_stride.shared::cta.b1024.b64 [%0], 1, %1;"
:: "l"(smem_int64_desc), "l"(prob_stride[2]));
#else
asm volatile (
"tensormap.replace.tile.global_stride.shared::cta.b1024.b64 [%0], 0, %1;"
:: "l"(smem_int64_desc), "l"(prob_stride[1] >> 4));
asm volatile (
"tensormap.replace.tile.global_stride.shared::cta.b1024.b64 [%0], 1, %1;"
:: "l"(smem_int64_desc), "l"(prob_stride[2] >> 4));
#endif
#else
CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3");
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Perform a fused copy and fence operation (needed when modifying tensormap in shared memory)
////////////////////////////////////////////////////////////////////////////////////////////////////
CUTE_HOST_DEVICE
void
tma_descriptor_cp_fence_release(TmaDescriptor const* gmem_desc_ptr, TmaDescriptor& smem_desc)
{
#if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(gmem_desc_ptr);
uint32_t smem_int_desc = cast_smem_ptr_to_uint(&smem_desc);
asm volatile (
"tensormap.cp_fenceproxy.global.shared::cta.tensormap::generic.release.gpu.sync.aligned [%0], [%1], 128;"
:: "l"(gmem_int_desc), "r"(smem_int_desc));
#else
CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3");
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Perform a release fence operation (needed when modifying tensormap directly in GMEM)
////////////////////////////////////////////////////////////////////////////////////////////////////
CUTE_HOST_DEVICE
void
tma_descriptor_fence_release()
{
#if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED)
asm volatile ("fence.proxy.tensormap::generic.release.gpu;");
#else
CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3");
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Perform a acquire fence operation
////////////////////////////////////////////////////////////////////////////////////////////////////
CUTE_HOST_DEVICE
void
tma_descriptor_fence_acquire(TmaDescriptor const* desc_ptr)
{
#if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED)
uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr);
asm volatile (
"fence.proxy.tensormap::generic.acquire.gpu [%0], 128;"
:
: "l"(gmem_int_desc)
: "memory");
asm volatile (
"cvta.global.u64 %0, %0;"
:
: "l"(gmem_int_desc), "l"(gmem_int_desc)
: "memory");
#else
CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3");
#endif
}
///////////////////////////////////////////////////////////////////////////////
} // end namespace cute
| include/cute/arch/copy_sm90_desc.hpp/0 | {
"file_path": "include/cute/arch/copy_sm90_desc.hpp",
"repo_id": "include",
"token_count": 6461
} | 13 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Statically sized array of elements that accommodates subbyte trivial types
in a packed storage.
*/
#pragma once
#include <cute/config.hpp>
#include <cute/numeric/numeric_types.hpp>
#include <cute/numeric/integral_constant.hpp>
namespace cute
{
//
// Underlying subbyte storage type
//
template <class T>
using subbyte_storage_type_t = conditional_t<(cute::sizeof_bits_v<T> <= 8), uint8_t,
conditional_t<(cute::sizeof_bits_v<T> <= 16), uint16_t,
conditional_t<(cute::sizeof_bits_v<T> <= 32), uint32_t,
conditional_t<(cute::sizeof_bits_v<T> <= 64), uint64_t,
conditional_t<(cute::sizeof_bits_v<T> <= 128), uint128_t,
T>>>>>;
template <class T> struct subbyte_iterator;
template <class, class> struct swizzle_ptr;
//
// subbyte_reference
// Proxy object for sub-byte element references
//
template <class T>
struct subbyte_reference
{
// Iterator Element type (const or non-const)
using element_type = T;
// Iterator Value type without type qualifier.
using value_type = remove_cv_t<T>;
// Storage type (const or non-const)
using storage_type = conditional_t<(is_const_v<T>), subbyte_storage_type_t<T> const, subbyte_storage_type_t<T>>;
static_assert(sizeof_bits_v<storage_type> % 8 == 0, "Storage type is not supported");
static_assert(sizeof_bits_v<element_type> <= sizeof_bits_v<storage_type>,
"Size of Element must not be greater than Storage.");
private:
// Bitmask for covering one item
static constexpr storage_type BitMask = storage_type(storage_type(-1) >> (sizeof_bits_v<storage_type> - sizeof_bits_v<element_type>));
// Flag for fast branching on straddled elements
static constexpr bool is_storage_unaligned = ((sizeof_bits_v<storage_type> % sizeof_bits_v<element_type>) != 0);
friend struct subbyte_iterator<T>;
// Pointer to storage element
storage_type* ptr_ = nullptr;
// Bit index of value_type starting position within storage_type element.
// RI: 0 <= idx_ < sizeof_bit<storage_type>
uint8_t idx_ = 0;
// Ctor
template <class PointerType>
CUTE_HOST_DEVICE constexpr
subbyte_reference(PointerType* ptr, uint8_t idx = 0) : ptr_(reinterpret_cast<storage_type*>(ptr)), idx_(idx) {}
public:
// Copy Ctor
CUTE_HOST_DEVICE constexpr
subbyte_reference(subbyte_reference const& other) {
*this = element_type(other);
}
// Copy Assignment
CUTE_HOST_DEVICE constexpr
subbyte_reference& operator=(subbyte_reference const& other) {
return *this = element_type(other);
}
// Assignment
template <class T_ = element_type>
CUTE_HOST_DEVICE constexpr
enable_if_t<!is_const_v<T_>, subbyte_reference&> operator=(element_type x)
{
static_assert(is_same_v<T_, element_type>, "Do not specify template arguments!");
storage_type item = (reinterpret_cast<storage_type const&>(x) & BitMask);
// Update the current storage element
storage_type bit_mask_0 = storage_type(BitMask << idx_);
ptr_[0] = storage_type((ptr_[0] & ~bit_mask_0) | (item << idx_));
// If value_type is unaligned with storage_type (static) and this is a straddled value (dynamic)
if (is_storage_unaligned && idx_ + sizeof_bits_v<value_type> > sizeof_bits_v<storage_type>) {
uint8_t straddle_bits = uint8_t(sizeof_bits_v<storage_type> - idx_);
storage_type bit_mask_1 = storage_type(BitMask >> straddle_bits);
// Update the next storage element
ptr_[1] = storage_type((ptr_[1] & ~bit_mask_1) | (item >> straddle_bits));
}
return *this;
}
// Comparison of referenced values
CUTE_HOST_DEVICE constexpr friend
bool operator==(subbyte_reference const& x, subbyte_reference const& y) { return x.get() == y.get(); }
CUTE_HOST_DEVICE constexpr friend
bool operator!=(subbyte_reference const& x, subbyte_reference const& y) { return x.get() != y.get(); }
CUTE_HOST_DEVICE constexpr friend
bool operator< (subbyte_reference const& x, subbyte_reference const& y) { return x.get() < y.get(); }
CUTE_HOST_DEVICE constexpr friend
bool operator> (subbyte_reference const& x, subbyte_reference const& y) { return x.get() > y.get(); }
CUTE_HOST_DEVICE constexpr friend
bool operator<=(subbyte_reference const& x, subbyte_reference const& y) { return x.get() <= y.get(); }
CUTE_HOST_DEVICE constexpr friend
bool operator>=(subbyte_reference const& x, subbyte_reference const& y) { return x.get() >= y.get(); }
// Value
CUTE_HOST_DEVICE
element_type get() const
{
if constexpr (is_same_v<bool, value_type>) { // Extract to bool -- potentially faster impl
return bool((*ptr_) & (BitMask << idx_));
} else { // Extract to element_type
// Extract from the current storage element
auto item = storage_type((ptr_[0] >> idx_) & BitMask);
// If value_type is unaligned with storage_type (static) and this is a straddled value (dynamic)
if (is_storage_unaligned && idx_ + sizeof_bits_v<value_type> > sizeof_bits_v<storage_type>) {
uint8_t straddle_bits = uint8_t(sizeof_bits_v<storage_type> - idx_);
storage_type bit_mask_1 = storage_type(BitMask >> straddle_bits);
// Extract from the next storage element
item |= storage_type((ptr_[1] & bit_mask_1) << straddle_bits);
}
return reinterpret_cast<element_type&>(item);
}
}
// Extract to type element_type
CUTE_HOST_DEVICE constexpr
operator element_type() const {
return get();
}
// Address
subbyte_iterator<T> operator&() const {
return {ptr_, idx_};
}
};
//
// subbyte_iterator
// Random-access iterator over subbyte references
//
template <class T>
struct subbyte_iterator
{
// Iterator Element type (const or non-const)
using element_type = T;
// Iterator Value type without type qualifier.
using value_type = remove_cv_t<T>;
// Storage type (const or non-const)
using storage_type = conditional_t<(is_const_v<T>), subbyte_storage_type_t<T> const, subbyte_storage_type_t<T>>;
// Reference proxy type
using reference = subbyte_reference<element_type>;
static_assert(sizeof_bits_v<storage_type> % 8 == 0, "Storage type is not supported");
static_assert(sizeof_bits_v<element_type> <= sizeof_bits_v<storage_type>,
"Size of Element must not be greater than Storage.");
private:
template <class, class> friend struct swizzle_ptr;
template <class U> friend CUTE_HOST_DEVICE constexpr U* raw_pointer_cast(subbyte_iterator<U> const&);
template <class N, class U> friend CUTE_HOST_DEVICE constexpr auto recast_ptr(subbyte_iterator<U> const&);
template <class U> friend CUTE_HOST_DEVICE void print(subbyte_iterator<U> const&);
// Pointer to storage element
storage_type* ptr_;
// Bit index of value_type starting position within storage_type element.
// RI: 0 <= idx_ < sizeof_bit<storage_type>
uint8_t idx_;
public:
// Default Ctor
CUTE_HOST_DEVICE constexpr
subbyte_iterator() : ptr_{nullptr}, idx_{0} {};
// Ctor
template <class PointerType>
CUTE_HOST_DEVICE constexpr
subbyte_iterator(PointerType* ptr, uint8_t idx = 0) : ptr_(reinterpret_cast<storage_type*>(ptr)), idx_(idx) { }
CUTE_HOST_DEVICE constexpr
reference operator*() const {
return reference(ptr_, idx_);
}
CUTE_HOST_DEVICE constexpr
subbyte_iterator& operator+=(uint64_t k) {
k = sizeof_bits_v<value_type> * k + idx_;
ptr_ += k / sizeof_bits_v<storage_type>;
idx_ = k % sizeof_bits_v<storage_type>;
return *this;
}
CUTE_HOST_DEVICE constexpr
subbyte_iterator operator+(uint64_t k) const {
return subbyte_iterator(ptr_, idx_) += k;
}
CUTE_HOST_DEVICE constexpr
reference operator[](uint64_t k) const {
return *(*this + k);
}
CUTE_HOST_DEVICE constexpr
subbyte_iterator& operator++() {
idx_ += sizeof_bits_v<value_type>;
if (idx_ >= sizeof_bits_v<storage_type>) {
++ptr_;
idx_ -= sizeof_bits_v<storage_type>;
}
return *this;
}
CUTE_HOST_DEVICE constexpr
subbyte_iterator operator++(int) {
subbyte_iterator ret(*this);
++(*this);
return ret;
}
CUTE_HOST_DEVICE constexpr
subbyte_iterator& operator--() {
if (idx_ >= sizeof_bits_v<value_type>) {
idx_ -= sizeof_bits_v<value_type>;
} else {
--ptr_;
idx_ += sizeof_bits_v<storage_type> - sizeof_bits_v<value_type>;
}
return *this;
}
CUTE_HOST_DEVICE constexpr
subbyte_iterator operator--(int) {
subbyte_iterator ret(*this);
--(*this);
return ret;
}
CUTE_HOST_DEVICE constexpr friend
bool operator==(subbyte_iterator const& x, subbyte_iterator const& y) {
return x.ptr_ == y.ptr_ && x.idx_ == y.idx_;
}
CUTE_HOST_DEVICE constexpr friend
bool operator!=(subbyte_iterator const& x, subbyte_iterator const& y) { return !(x == y); }
CUTE_HOST_DEVICE constexpr friend
bool operator< (subbyte_iterator const& x, subbyte_iterator const& y) {
return x.ptr_ < y.ptr_ || (x.ptr_ == y.ptr_ && x.idx_ < y.idx_);
}
CUTE_HOST_DEVICE constexpr friend
bool operator<=(subbyte_iterator const& x, subbyte_iterator const& y) { return !(y < x); }
CUTE_HOST_DEVICE constexpr friend
bool operator> (subbyte_iterator const& x, subbyte_iterator const& y) { return (y < x); }
CUTE_HOST_DEVICE constexpr friend
bool operator>=(subbyte_iterator const& x, subbyte_iterator const& y) { return !(x < y); }
};
// Conversion to raw pointer with loss of subbyte index
template <class T>
CUTE_HOST_DEVICE constexpr
T*
raw_pointer_cast(subbyte_iterator<T> const& x) {
assert(x.idx_ == 0);
return reinterpret_cast<T*>(x.ptr_);
}
// Conversion to NewT_ with possible loss of subbyte index
template <class NewT_, class T>
CUTE_HOST_DEVICE constexpr
auto
recast_ptr(subbyte_iterator<T> const& x) {
using NewT = conditional_t<(is_const_v<T>), NewT_ const, NewT_>;
if constexpr (cute::is_subbyte_v<NewT>) { // Making subbyte_iter, preserve the subbyte idx
return subbyte_iterator<NewT>(x.ptr_, x.idx_);
} else { // Not subbyte, assume/assert subbyte idx 0
return reinterpret_cast<NewT*>(raw_pointer_cast(x));
}
CUTE_GCC_UNREACHABLE;
}
template <class T>
CUTE_HOST_DEVICE void
print(subbyte_iterator<T> const& x) {
printf("subptr[%db](%p.%u)", int(sizeof_bits_v<T>), x.ptr_, x.idx_);
}
//
// array_subbyte
// Statically sized array for non-byte-aligned data types
//
template <class T, size_t N>
struct array_subbyte
{
using element_type = T;
using value_type = remove_cv_t<T>;
using pointer = element_type*;
using const_pointer = element_type const*;
using size_type = size_t;
using difference_type = ptrdiff_t;
//
// References
//
using reference = subbyte_reference<element_type>;
using const_reference = subbyte_reference<element_type const>;
//
// Iterators
//
using iterator = subbyte_iterator<element_type>;
using const_iterator = subbyte_iterator<element_type const>;
// Storage type (const or non-const)
using storage_type = conditional_t<(is_const_v<T>), subbyte_storage_type_t<T> const, subbyte_storage_type_t<T>>;
static_assert(sizeof_bits_v<storage_type> % 8 == 0, "Storage type is not supported");
private:
// Number of storage elements, ceil_div
static constexpr size_type StorageElements = (N * sizeof_bits_v<value_type> + sizeof_bits_v<storage_type> - 1) / sizeof_bits_v<storage_type>;
// Internal storage
storage_type storage[StorageElements];
public:
CUTE_HOST_DEVICE constexpr
size_type size() const {
return N;
}
CUTE_HOST_DEVICE constexpr
size_type max_size() const {
return N;
}
CUTE_HOST_DEVICE constexpr
bool empty() const {
return !N;
}
// Efficient clear method
CUTE_HOST_DEVICE constexpr
void clear() {
CUTE_UNROLL
for (size_type i = 0; i < StorageElements; ++i) {
storage[i] = storage_type(0);
}
}
CUTE_HOST_DEVICE constexpr
void fill(T const& value) {
CUTE_UNROLL
for (size_type i = 0; i < N; ++i) {
at(i) = value;
}
}
CUTE_HOST_DEVICE constexpr
reference at(size_type pos) {
return iterator(storage)[pos];
}
CUTE_HOST_DEVICE constexpr
const_reference at(size_type pos) const {
return const_iterator(storage)[pos];
}
CUTE_HOST_DEVICE constexpr
reference operator[](size_type pos) {
return at(pos);
}
CUTE_HOST_DEVICE constexpr
const_reference operator[](size_type pos) const {
return at(pos);
}
CUTE_HOST_DEVICE constexpr
reference front() {
return at(0);
}
CUTE_HOST_DEVICE constexpr
const_reference front() const {
return at(0);
}
CUTE_HOST_DEVICE constexpr
reference back() {
return at(N-1);
}
CUTE_HOST_DEVICE constexpr
const_reference back() const {
return at(N-1);
}
// In analogy to std::vector<bool>::data(), these functions are deleted to prevent bugs.
// Instead, prefer
// auto* data = raw_pointer_cast(my_subbyte_array.begin());
// where the type of auto* is implementation-defined and
// with the knowledge that [data, data + my_subbyte_array.size()) may not be a valid range.
CUTE_HOST_DEVICE constexpr
pointer data() = delete;
CUTE_HOST_DEVICE constexpr
const_pointer data() const = delete;
CUTE_HOST_DEVICE constexpr
iterator begin() {
return iterator(storage);
}
CUTE_HOST_DEVICE constexpr
const_iterator begin() const {
return const_iterator(storage);
}
CUTE_HOST_DEVICE constexpr
const_iterator cbegin() const {
return begin();
}
CUTE_HOST_DEVICE constexpr
iterator end() {
return iterator(storage) + N;
}
CUTE_HOST_DEVICE constexpr
const_iterator end() const {
return const_iterator(storage) + N;
}
CUTE_HOST_DEVICE constexpr
const_iterator cend() const {
return end();
}
//
// Comparison operators
//
};
//
// Operators
//
template <class T, size_t N>
CUTE_HOST_DEVICE constexpr
void clear(array_subbyte<T,N>& a)
{
a.clear();
}
template <class T, size_t N>
CUTE_HOST_DEVICE constexpr
void fill(array_subbyte<T,N>& a, T const& value)
{
a.fill(value);
}
} // namespace cute
//
// Specialize tuple-related functionality for cute::array_subbyte
//
#if defined(__CUDACC_RTC__)
#include <cuda/std/tuple>
#else
#include <tuple>
#endif
namespace cute
{
template <size_t I, class T, size_t N>
CUTE_HOST_DEVICE constexpr
T& get(array_subbyte<T,N>& a)
{
static_assert(I < N, "Index out of range");
return a[I];
}
template <size_t I, class T, size_t N>
CUTE_HOST_DEVICE constexpr
T const& get(array_subbyte<T,N> const& a)
{
static_assert(I < N, "Index out of range");
return a[I];
}
template <size_t I, class T, size_t N>
CUTE_HOST_DEVICE constexpr
T&& get(array_subbyte<T,N>&& a)
{
static_assert(I < N, "Index out of range");
return cute::move(a[I]);
}
} // end namespace cute
namespace CUTE_STL_NAMESPACE
{
template <class T>
struct is_reference<cute::subbyte_reference<T>>
: CUTE_STL_NAMESPACE::true_type
{};
template <class T, size_t N>
struct tuple_size<cute::array_subbyte<T,N>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, N>
{};
template <size_t I, class T, size_t N>
struct tuple_element<I, cute::array_subbyte<T,N>>
{
using type = T;
};
template <class T, size_t N>
struct tuple_size<const cute::array_subbyte<T,N>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, N>
{};
template <size_t I, class T, size_t N>
struct tuple_element<I, const cute::array_subbyte<T,N>>
{
using type = T;
};
} // end namespace CUTE_STL_NAMESPACE
#ifdef CUTE_STL_NAMESPACE_IS_CUDA_STD
namespace std
{
#if defined(__CUDACC_RTC__)
template <class... _Tp>
struct tuple_size;
template <size_t _Ip, class... _Tp>
struct tuple_element;
#endif
template <class T, size_t N>
struct tuple_size<cute::array_subbyte<T,N>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, N>
{};
template <size_t I, class T, size_t N>
struct tuple_element<I, cute::array_subbyte<T,N>>
{
using type = T;
};
template <class T, size_t N>
struct tuple_size<const cute::array_subbyte<T,N>>
: CUTE_STL_NAMESPACE::integral_constant<size_t, N>
{};
template <size_t I, class T, size_t N>
struct tuple_element<I, const cute::array_subbyte<T,N>>
{
using type = T;
};
} // end namespace std
#endif // CUTE_STL_NAMESPACE_IS_CUDA_STD
| include/cute/container/array_subbyte.hpp/0 | {
"file_path": "include/cute/container/array_subbyte.hpp",
"repo_id": "include",
"token_count": 6855
} | 14 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <vector_types.h>
#include <cutlass/numeric_types.h>
#include <cutlass/numeric_size.h>
#include <cute/numeric/int.hpp>
#include <cute/numeric/real.hpp>
namespace cute {
template <typename T>
struct sizeof_bits : public cutlass::sizeof_bits<T> {};
// DO NOT change auto to int, sizeof_bits<sparse_elem> use integral_ratio instead of int
template <class T>
static constexpr auto sizeof_bits_v = sizeof_bits<T>::value;
using cutlass::bits_to_bytes;
using cutlass::is_subbyte;
template <class T>
static constexpr auto is_subbyte_v = is_subbyte<T>::value;
using cutlass::half_t;
using cutlass::bfloat16_t;
using cutlass::tfloat32_t;
// Umbrella floating-point 8-bit data type : type_erased_dynamic_float8_t
// This umbrella datatype can be enabled when a user provides a specific
// datatype in runtime argument list.
using cutlass::type_erased_dynamic_float8_t;
using cutlass::float_e4m3_t;
using cutlass::float_e5m2_t;
using cutlass::uint1b_t;
using cutlass::int2b_t;
using cutlass::uint2b_t;
using cutlass::int4b_t;
using cutlass::uint4b_t;
using cutlass::bin1_t;
} // end namespace cute
| include/cute/numeric/numeric_types.hpp/0 | {
"file_path": "include/cute/numeric/numeric_types.hpp",
"repo_id": "include",
"token_count": 877
} | 15 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Sparse matrix multiply accumulate for SM80
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "mma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
#if ((__CUDACC_VER_MAJOR__ > 11) || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1))
#define CUTLASS_ARCH_SPARSE_MMA_SM80_SUPPORTED 1
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
#define CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED
#endif
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 16832
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F16 = F16 * F16 + F16
template <>
struct SparseMma<
gemm::GemmShape<16, 8, 32>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
half_t,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread
> {
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 8>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 8>;
using ElementC = half_t;
using LayoutC = layout::RowMajor;
using FragmentC = Array<half_t, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 2;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c, uint32_t const &E, int const id2) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
uint32_t const *C = reinterpret_cast<uint32_t const *>(&c);
uint32_t *D = reinterpret_cast<uint32_t *>(&d);
#if ((__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 5))
if (id2 == 0) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k32.row.col.f16.f16.f16.f16 {%0,%1}, "
"{%2,%3,%4,%5}, {%6,%7,%8,%9}, {%10,%11}, %12, 0x0;\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(E));
}
else if (id2 == 1) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k32.row.col.f16.f16.f16.f16 {%0,%1}, "
"{%2,%3,%4,%5}, {%6,%7,%8,%9}, {%10,%11}, %12, 0x1;\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(E));
}
else {
assert(0);
}
#else
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f16.f16.f16.f16 {%0,%1}, "
"{%2,%3,%4,%5}, {%6,%7,%8,%9}, {%10,%11}, %12, 0x0;\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(E));
}
else if (id2 == 1) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f16.f16.f16.f16 {%0,%1}, "
"{%2,%3,%4,%5}, {%6,%7,%8,%9}, {%10,%11}, %12, 0x1;\n"
: "=r"(D[0]), "=r"(D[1])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "r"(C[0]), "r"(C[1]), "r"(E));
}
else {
assert(0);
}
#endif
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = F16 * F16 + F32
template <>
struct SparseMma<
gemm::GemmShape<16, 8, 32>,
32,
half_t,
layout::RowMajor,
half_t,
layout::ColumnMajor,
float,
layout::RowMajor,
OpMultiplyAdd,
SPFormatType::Thread
> {
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = half_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<half_t, 8>;
using ElementB = half_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<half_t, 8>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 2;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c, uint32_t const &E, int const id2) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
#if ((__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 5))
if (id2 == 0) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k32.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]),
"r"(E));
}
else if (id2 == 1) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k32.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x1;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]),
"r"(E));
}
else {
assert(0);
}
#else
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]),
"r"(E));
}
else if (id2 == 1) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f32.f16.f16.f32 {%0,%1,%2,%3}, "
"{%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x1;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]),
"r"(B[2]), "r"(B[3]), "f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]),
"r"(E));
}
else {
assert(0);
}
#endif
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 16832 - Float BF16, FP32 accumulation
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = bf16 * bf16 + F32
template <>
struct SparseMma<gemm::GemmShape<16, 8, 32>, 32, bfloat16_t, layout::RowMajor,
bfloat16_t, layout::ColumnMajor, float, layout::RowMajor,
OpMultiplyAdd, SPFormatType::Thread> {
using Shape = gemm::GemmShape<16, 8, 32>;
using ElementA = bfloat16_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<bfloat16_t, 8>;
using ElementB = bfloat16_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<bfloat16_t, 8>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 2;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c, uint32_t const &E, int const id2) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
#if ((__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 5))
if (id2 == 0) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k32.row.col.f32.bf16.bf16.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else if (id2 == 1) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k32.row.col.f32.bf16.bf16.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x1;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else {
assert(0);
}
#else
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f32.bf16.bf16.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else if (id2 == 1) {
asm volatile(
"mma.sp.sync.aligned.m16n8k32.row.col.f32.bf16.bf16.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x1;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else {
assert(0);
}
#endif
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 16816 - Float TF32
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: F32 = tf32 * tf32 + F32
template <>
struct SparseMma<gemm::GemmShape<16, 8, 16>, 32, tfloat32_t, layout::RowMajor,
tfloat32_t, layout::ColumnMajor, float, layout::RowMajor,
OpMultiplyAdd, SPFormatType::Thread> {
using Shape = gemm::GemmShape<16, 8, 16>;
using ElementA = tfloat32_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<tfloat32_t, 4>;
using ElementB = tfloat32_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<tfloat32_t, 4>;
using ElementC = float;
using LayoutC = layout::RowMajor;
using FragmentC = Array<float, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAdd;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 4;
static int const kMaxID2 = 2;
CUTLASS_HOST_DEVICE
void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b,
FragmentC const &c, uint32_t const &E, int const id2) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
float const *C = reinterpret_cast<float const *>(&c);
float *D = reinterpret_cast<float *>(&d);
#if ((__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 5))
if (id2 == 0) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k16.row.col.f32.tf32.tf32.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else if (id2 == 1) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k16.row.col.f32.tf32.tf32.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x1;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else {
assert(0);
}
#else
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k16.row.col.f32.tf32.tf32.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else if (id2 == 1) {
asm volatile(
"mma.sp.sync.aligned.m16n8k16.row.col.f32.tf32.tf32.f32 "
"{%0,%1,%2,%3}, {%4,%5,%6,%7}, {%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x1;\n"
: "=f"(D[0]), "=f"(D[1]), "=f"(D[2]), "=f"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"f"(C[0]), "f"(C[1]), "f"(C[2]), "f"(C[3]), "r"(E));
} else {
assert(0);
}
#endif
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 16864 - S8 input, S32 accumulation - SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S8 * S8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
int8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
#if ((__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 5))
if (id2 == 0) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k64.row.col.s32.s8.s8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#else
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.s8.s8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#endif
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S8 * U8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
int8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = int8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<int8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
#if ((__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 5))
if (id2 == 0) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k64.row.col.s32.s8.u8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#else
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.s8.u8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#endif
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * S8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
uint8_t,
layout::RowMajor,
int8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = int8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<int8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
#if ((__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 5))
if (id2 == 0) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k64.row.col.s32.u8.s8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#else
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.u8.s8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#endif
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U8 * U8 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,64>,
32,
uint8_t,
layout::RowMajor,
uint8_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,64>;
using ElementA = uint8_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<uint8_t, 16>;
using ElementB = uint8_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<uint8_t, 16>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
#if ((__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 5))
if (id2 == 0) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k64.row.col.s32.u8.u8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#else
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k64.row.col.s32.u8.u8.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#endif
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
////////////////////////////////////////////////////////////////////////////////
//
// Sparse Matrix Multiply 168128 - S4 input, S32 accumulation - SATURATE
//
////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation: S32 = S4 * S4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
#if ((__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 5))
if (id2 == 0) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k128.row.col.s32.s4.s4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#else
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.s4.s4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#endif
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = S4 * U4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::int4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::int4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::int4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
#if ((__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 5))
if (id2 == 0) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k128.row.col.s32.s4.u4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#else
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.s4.u4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#endif
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * S4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::int4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::int4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::int4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
#if ((__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 5))
if (id2 == 0) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k128.row.col.s32.u4.s4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#else
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.u4.s4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#endif
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/// Matrix multiply-add operation: S32 = U4 * U4 + S32
template <>
struct SparseMma<
gemm::GemmShape<16,8,128>,
32,
cutlass::uint4b_t,
layout::RowMajor,
cutlass::uint4b_t,
layout::ColumnMajor,
int,
layout::RowMajor,
OpMultiplyAddSaturate,
SPFormatType::Thread> {
using Shape = gemm::GemmShape<16,8,128>;
using ElementA = cutlass::uint4b_t;
using LayoutA = layout::RowMajor;
using FragmentA = Array<cutlass::uint4b_t, 32>;
using ElementB = cutlass::uint4b_t;
using LayoutB = layout::ColumnMajor;
using FragmentB = Array<cutlass::uint4b_t, 32>;
using ElementC = int;
using LayoutC = layout::RowMajor;
using FragmentC = Array<int, 4>;
using FragmentE = uint32_t;
using Operator = OpMultiplyAddSaturate;
using ArchTag = arch::Sm80;
static int const kSparse = 2;
static int const kMetaSizeInBits = 2;
static int const kMaxID2 = 1;
/// Computes multiply-add
CUTLASS_HOST_DEVICE
void operator()(
FragmentC &d,
FragmentA const &a,
FragmentB const &b,
FragmentC const &c,
uint32_t const &E,
int const id2
) const {
#if defined(CUTLASS_ARCH_SPARSE_MMA_SM80_ENABLED)
uint32_t const *A = reinterpret_cast<uint32_t const *>(&a);
uint32_t const *B = reinterpret_cast<uint32_t const *>(&b);
int const *C = reinterpret_cast<int const *>(&c);
int *D = reinterpret_cast<int *>(&d);
#if ((__CUDACC_VER_MAJOR__ > 12) || (__CUDACC_VER_MAJOR__ == 12 && __CUDACC_VER_MINOR__ >= 5))
if (id2 == 0) {
asm volatile(
"mma.sp::ordered_metadata.sync.aligned.m16n8k128.row.col.s32.u4.u4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#else
if (id2 == 0) {
asm volatile(
"mma.sp.sync.aligned.m16n8k128.row.col.s32.u4.u4.s32.satfinite {%0,%1,%2,%3}, {%4,%5,%6,%7}, "
"{%8,%9,%10,%11}, {%12,%13,%14,%15}, %16, 0x0;\n"
: "=r"(D[0]), "=r"(D[1]), "=r"(D[2]), "=r"(D[3])
: "r"(A[0]), "r"(A[1]), "r"(A[2]), "r"(A[3]), "r"(B[0]), "r"(B[1]), "r"(B[2]), "r"(B[3]),
"r"(C[0]), "r"(C[1]), "r"(C[2]), "r"(C[3]), "r"(E));
} else {
assert(0);
}
#endif
#else
CUTLASS_UNUSED(a);
CUTLASS_UNUSED(b);
CUTLASS_UNUSED(c);
CUTLASS_UNUSED(d);
assert(0);
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/arch/mma_sparse_sm80.h/0 | {
"file_path": "include/cutlass/arch/mma_sparse_sm80.h",
"repo_id": "include",
"token_count": 18650
} | 16 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumerated type describing the type of kernel (based on input or output matrices).
enum class BlasMode {
kGemm,
kSymmetric,
kHermitian,
kTriangular,
kInvalid
};
/// Enumerated type describing the fill mode for matrices for BLAS functions.
enum class FillMode {
kFull, /// The entire tensor is covered.
kLower, /// The 'lower' part of a tensor is covered including diagonal
kUpper, /// The 'upper' part of a tensor is covered including diaognal
kDiagonal, /// Only diagonal elements are covered.
kNone, /// No element is covered.
kInvalid
};
/// Enumerated type describing the diagonal property of matrices for BLAS functions.
enum class DiagType {
kNonUnit,
kUnit,
kZero, // Only used internally for computing SYMM/HEMM
kInvalid
};
/// Enumerated type describing the side dense matrix is in matrix equation for BLAS functions.
enum class SideMode {
kLeft,
kRight,
kInvalid
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/blas3_types.h/0 | {
"file_path": "include/cutlass/blas3_types.h",
"repo_id": "include",
"token_count": 835
} | 17 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Template for device-level Depthwise Convolution
*/
#pragma once
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/device_kernel.h"
#include "cutlass/conv/convolution.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template<typename DirectConvolutionKernel_>
class DirectConvolution {
public:
using UnderlyingKernel = DirectConvolutionKernel_;
using ElementA = typename UnderlyingKernel::ElementA;
using LayoutA = typename UnderlyingKernel::LayoutA;
using ElementB = typename UnderlyingKernel::ElementB;
using LayoutB = typename UnderlyingKernel::LayoutB;
using ElementC = typename UnderlyingKernel::ElementC;
using LayoutC = typename UnderlyingKernel::LayoutC;
using ElementAccumulator = typename UnderlyingKernel::ElementAccumulator;
using ElementCompute = typename UnderlyingKernel::ElementCompute;
using OperatorClass = typename UnderlyingKernel::OperatorClass;
using ArchTag = typename UnderlyingKernel::ArchTag;
using ThreadblockShape = typename UnderlyingKernel::ThreadblockShape;
using WarpShape = typename UnderlyingKernel::WarpShape;
using InstructionShape = typename UnderlyingKernel::InstructionShape;
using ThreadblockSwizzle = typename UnderlyingKernel::ThreadblockSwizzle;
using EpilogueOutputOp = typename UnderlyingKernel::EpilogueOutputOp;
static int const kStages = UnderlyingKernel::kStages;
static int const kConvDim = UnderlyingKernel::kConvDim;
using WarpMmaOperator = typename UnderlyingKernel::WarpMmaOperator;
using ArchMmaOperator = typename UnderlyingKernel::ArchMmaOperator;
using MathOperator = typename UnderlyingKernel::MathOperator;
static cutlass::conv::Operator const kConvolutionalOperator = UnderlyingKernel::kConvolutionalOperator;
static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = UnderlyingKernel::kIteratorAlgorithm;
static cutlass::conv::StrideSupport const kStrideSupport = UnderlyingKernel::kStrideSupport;
static cutlass::conv::GroupMode const kGroupMode = UnderlyingKernel::kGroupMode;
static int const kWarpCount =
(ThreadblockShape::kM / WarpShape::kM) *
(ThreadblockShape::kN / WarpShape::kN) *
(ThreadblockShape::kK / WarpShape::kK);
/// Argument structure
using Arguments = typename UnderlyingKernel::Arguments;
using ReorderKernel = typename UnderlyingKernel::ReorderKernel;
private:
/// Kernel parameters object
typename UnderlyingKernel::Params params_;
public:
/// Constructs Implicit GEMM
DirectConvolution() { }
/// Determines whether the Implicit GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
// dispatch to iterators
Status status = UnderlyingKernel::Mma::IteratorA::can_implement(args.problem_size);
if (Status::kSuccess != status) {
return status;
}
status = UnderlyingKernel::Mma::IteratorB::can_implement(args.problem_size);
if (Status::kSuccess != status) {
return status;
}
if (kGroupMode != conv::GroupMode::kDepthwise) {
return Status::kErrorInvalidProblem;
}
// C and K should be multiple of groups
if (args.problem_size.K != args.problem_size.groups &&
args.problem_size.C != args.problem_size.groups) {
return Status::kErrorInvalidProblem;
}
static int const kAlignmentC = UnderlyingKernel::Epilogue::OutputTileIterator::kElementsPerAccess;
if (kConvolutionalOperator == conv::Operator::kFprop) {
if (args.problem_size.K % kAlignmentC)
return Status::kErrorMisalignedOperand;
} else if (kConvolutionalOperator == conv::Operator::kDgrad) {
if (args.problem_size.C % kAlignmentC)
return Status::kErrorMisalignedOperand;
} else if (kConvolutionalOperator == conv::Operator::kWgrad) {
if (args.problem_size.C % kAlignmentC)
return Status::kErrorMisalignedOperand;
}
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(
threadblock_swizzle.get_tiled_shape(
kConvolutionalOperator,
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.problem_size.split_k_slices));
if (!(grid.y <= std::numeric_limits<uint16_t>::max() &&
grid.z <= std::numeric_limits<uint16_t>::max())) {
return Status::kErrorInvalidProblem;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return 0;
}
/// Initializes GEMM state from arguments.
Status initialize(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
// initialize the params structure from the arguments
params_ = typename UnderlyingKernel::Params(
args,
static_cast<int *>(workspace)
);
int smem_size = int(sizeof(typename UnderlyingKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(cutlass::Kernel<UnderlyingKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Initializes GEMM state from arguments.
Status update(Arguments const &args, void *workspace = nullptr) {
// update the params structure from the arguments
params_.ptr_A = args.ref_A.data();
params_.ptr_B = args.ref_B.data();
params_.ptr_C = args.ref_C.data();
params_.ptr_D = args.ref_D.data();
params_.output_op = args.output_op;
params_.ptr_reordered_B = args.ref_reordered_B.data();
params_.semaphore = static_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
// Launch reorder kernel
if (params_.ptr_reordered_B != nullptr) {
dim3 grid = ReorderKernel::get_grid_shape(params_);
dim3 block = ReorderKernel::get_block_shape();
cutlass::Kernel<ReorderKernel><<<grid, block, 0, stream>>>(params_);
}
// Launch main kernel
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(32 * kWarpCount, 1, 1);
// Dynamic SMEM size based on input params.
int smem_size = int(params_.get_smem_size());
// Make sure we can use that much shared memory.
cudaError_t status =
cudaFuncSetAttribute(cutlass::Kernel<UnderlyingKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size);
if (status != cudaSuccess)
return Status::kErrorInternal;
cutlass::Kernel<UnderlyingKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
int get_smem_size() { return int(params_.get_smem_size()); }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/device/direct_convolution.h/0 | {
"file_path": "include/cutlass/conv/device/direct_convolution.h",
"repo_id": "include",
"token_count": 3159
} | 18 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv3d_fprop_filter_tile_access_iterator_analytic.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv3dFprop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized,
conv::StrideSupport StrideSupport = StrideSupport::kUnity
> struct DefaultConv3dFprop;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv3dFprop specialization for Analytic Iterator Algorithm
/// and 2 stage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport
>
struct DefaultConv3dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
1,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv3dProblemSize
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv3dFprop specialization for Analytic IteratorAlgorithm and multistage
// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport
>
struct DefaultConv3dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Global,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
1,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv3dProblemSize
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv3dFprop specialization for Optimized Iterator Algorithm
/// and 2 stage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport
>
struct DefaultConv3dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
1,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv3dProblemSize
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv3dFprop specialization for Optimized IteratorAlgorithm and multistage
// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport
>
struct DefaultConv3dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Global,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
1,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
5
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv3dProblemSize
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassSimt convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv3dFprop specialization for Analytic IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport
>
struct DefaultConv3dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
5
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv3dProblemSize
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv3dFprop specialization for Optimized IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport
>
struct DefaultConv3dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
5
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv3dProblemSize
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv3dFprop specialization for Analytic IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport
>
struct DefaultConv3dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
5
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv3dProblemSize
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv3dFprop specialization for Optimized IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport
>
struct DefaultConv3dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv3dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv3dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
5
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop,
Conv3dProblemSize
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/default_conv3d_fprop.h/0 | {
"file_path": "include/cutlass/conv/kernel/default_conv3d_fprop.h",
"repo_id": "include",
"token_count": 9263
} | 19 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for depthwise convolution
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/arch/mma.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/thread/mma.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// MMA operation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Number of threads participating
int kThreads_,
/// Data type of A elements
typename ElementA,
/// Data type of B elements
typename ElementB,
/// Element type of C matrix
typename ElementC,
/// Inner product operator
typename Operator
>
struct ElementwiseInnerProduct;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// General implementation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Data type of B elements
typename ElementB_,
/// Element type of C matrix
typename ElementC_>
struct ElementwiseInnerProduct<Shape_, 1, ElementA_, ElementB_, ElementC_, arch::OpMultiplyAdd> {
using Shape = Shape_;
using Operator = arch::OpMultiplyAdd;
using ElementC = ElementC_;
CUTLASS_HOST_DEVICE
void operator()(Array<ElementC_, Shape::kN> &d,
Array<ElementA_, Shape::kN> const &a,
Array<ElementB_, Shape::kN> const &b,
Array<ElementC_, Shape::kN> const &c) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Shape::kN; ++i) {
d[i] = a[i] * b[i] + c[i];
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization of half_t
template <>
struct ElementwiseInnerProduct<
gemm::GemmShape<2, 2, 1>,
1,
half_t,
half_t,
half_t,
arch::OpMultiplyAdd> {
using Shape = gemm::GemmShape<2, 2, 1>;
using Operator = arch::OpMultiplyAdd;
using ElementC = half_t;
CUTLASS_HOST_DEVICE
void operator()(
Array<half_t, 2> &d,
Array<half_t, 2> const &a,
Array<half_t, 2> const &b,
Array<half_t, 2> const &c
) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600))
__half2 const & A = reinterpret_cast<__half2 const &>(a);
__half2 const & B = reinterpret_cast<__half2 const &>(b);
__half2 const & C = reinterpret_cast<__half2 const &>(c);
__half2 tmp_D = __hfma2(A, B, C);
d = reinterpret_cast<Array<half_t, 2> const &>(tmp_D);
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
d[i] = a[i] * b[i] + c[i];
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape,
/// Data type of A elements
typename ElementA,
/// Data type of B elements
typename ElementB,
/// Element type of C matrix
typename ElementC,
/// Concept: arch::OpMultiplyAdd or arch::Mma<>
typename Operator = arch::OpMultiplyAdd,
/// Used for partial specialization
typename Enable = bool
>
struct DepthwiseDirectConvElementwiseInnerProduct;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gemplate that handles all packed matrix layouts
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Data type of B elements
typename ElementB_,
/// Element type of C matrix
typename ElementC_,
/// Operator used to compute GEMM
typename Operator_
>
struct DepthwiseDirectConvElementwiseInnerProductGeneric {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = ElementA_;
/// Data type of operand B
using ElementB = ElementB_;
/// Element type of operand C
using ElementC = ElementC_;
/// Underlying mathematical operator
using Operator = Operator_;
/// A operand storage
using FragmentA = Array<ElementA, Shape::kMN>;
/// B operand storage
using FragmentB = Array<ElementB, Shape::kN>;
/// C operand storage
using FragmentC = Array<ElementC, Shape::kMN>;
/// Instruction
using MmaOp = cutlass::conv::thread::ElementwiseInnerProduct<
gemm::GemmShape<Shape::kN, Shape::kN, 1>,
1,
ElementA,
ElementB,
ElementC,
Operator>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
Array<ElementC, Shape::kN> *ptr_D = reinterpret_cast<Array<ElementC, Shape::kN> *>(&D);
Array<ElementA, Shape::kN> const *ptr_A =
reinterpret_cast<Array<ElementA, Shape::kN> const *>(&A);
Array<ElementB, Shape::kN> const *ptr_B =
reinterpret_cast<Array<ElementB, Shape::kN> const *>(&B);
MmaOp mma_op;
// Copy accumulators
D = C;
// Compute matrix product
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Shape::kN / MmaOp::Shape::kN; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Shape::kM; ++m) {
Array<ElementC, MmaOp::Shape::kN> tmpD = ptr_D[m * Shape::kN / MmaOp::Shape::kN + n];
Array<ElementA, MmaOp::Shape::kN> tmpA = ptr_A[m * Shape::kN / MmaOp::Shape::kN + n];
Array<ElementB, MmaOp::Shape::kN> tmpB = ptr_B[n];
mma_op(tmpD, tmpA, tmpB, tmpD);
ptr_D[m * Shape::kN / MmaOp::Shape::kN + n] = tmpD;
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Data type of B elements
typename ElementB_,
/// Element type of C matrix
typename ElementC_
>
struct DepthwiseDirectConvElementwiseInnerProduct<
Shape_,
ElementA_,
ElementB_,
ElementC_,
arch::OpMultiplyAdd
> {
/// Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
/// Data type of operand A
using ElementA = ElementA_;
/// Data type of operand B
using ElementB = ElementB_;
/// Element type of operand C
using ElementC = ElementC_;
/// Underlying mathematical operator
using Operator = arch::OpMultiplyAdd;
/// A operand storage
using FragmentA =
Array<ElementA, Shape::kMN>; // output_tile_size per thread * groups_per_thread
/// B operand storage
using FragmentB = Array<ElementB, Shape::kN>; // 1 * groups_per_thread
/// C operand storage
using FragmentC =
Array<ElementC, Shape::kMN>; // output_tile_size per thread * groups_per_thread
static bool const use_optimized = 0;
using ArchMmaOperator = DepthwiseDirectConvElementwiseInnerProductGeneric<Shape,
ElementA,
ElementB,
ElementC,
Operator>;
//
// Methods
//
/// Computes a matrix product D = A * B + C
CUTLASS_HOST_DEVICE
void operator()(
FragmentC & D,
FragmentA const & A,
FragmentB const & B,
FragmentC const & C) {
ArchMmaOperator mma;
mma(D, A, B, C);
}
};
} // namespace thread
} // namespace conv
} // namespace cutlass
| include/cutlass/conv/thread/depthwise_mma.h/0 | {
"file_path": "include/cutlass/conv/thread/depthwise_mma.h",
"repo_id": "include",
"token_count": 3403
} | 20 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Extracts the host-params objects into non-template code.
*/
#pragma once
#define TRACE_CONV_PARAMS_INITIALIZERS_ENABLED 0
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#if TRACE_CONV_PARAMS_INITIALIZERS_ENABLED
#include <fstream>
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized
template<typename Layout_ = layout::TensorNHWC >
struct Depthwise2dFpropDirectConvParams;
/// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation
template<typename Layout_ = layout::TensorNHWC >
struct Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams;
/// Parameters structure used for DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized
template<typename Layout_ = layout::TensorNHWC >
struct Depthwise2dFpropDirectConvFilterIteratorParams;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorOptimized
template<>
struct Depthwise2dFpropDirectConvParams<layout::TensorNHWC> {
using Layout = layout::TensorNHWC;
Layout layout;
int32_t activation_tile_h;
int32_t activation_tile_w;
int32_t activation_tile_hw;
FastDivmod activation_tile_w_divmod;
int filter[2];
int stride[2];
int dilation[2];
int inc_next[2];
FastDivmod pq_divmod;
FastDivmod q_divmod;
int activation_load_count;
int activation_storage_elements;
int activation_size;
//
// Methods
//
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvParams() { }
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< layout object
MatrixCoord threadblock_shape, ///< CTA threadblock Shape
Layout::TensorCoord threadblock_output_shape, ///< Output tile Shape per threadblock
const int element_size_bits, ///< bits of activation element
const int thread_count, ///< threads per threadblock
const int thread_count_contiguous, ///< number of threads for continuous dimension
const int element_per_load) ///< element per each load
: layout(layout) {
filter[0] = problem_size.S;
filter[1] = problem_size.R;
stride[0] = problem_size.stride_w;
stride[1] = problem_size.stride_h;
dilation[0] = problem_size.dilation_w;
dilation[1] = problem_size.dilation_h;
// Compute activation_tile size per threadblock because stride and dilation are runtime params.
activation_tile_h = (threadblock_output_shape.h() - 1) * problem_size.stride_h +
(problem_size.R - 1) * problem_size.dilation_h + 1;
activation_tile_w = (threadblock_output_shape.w() - 1) * problem_size.stride_w +
(problem_size.S - 1) * problem_size.dilation_w + 1;
activation_tile_hw = activation_tile_h * activation_tile_w;
activation_tile_w_divmod = FastDivmod(activation_tile_w);
/// Below two values could not be templatized because the stride and dilation are runtime params
activation_load_count = (thread_count_contiguous * activation_tile_hw + (thread_count - 1)) / thread_count;
activation_storage_elements = activation_load_count * element_per_load * thread_count;
activation_size = activation_storage_elements * element_size_bits / 8;
// Fastdivmod for output P, Q
int tiles_p =
(problem_size.P + (threadblock_output_shape.h() - 1)) / (threadblock_output_shape.h());
int tiles_q = (problem_size.Q + (threadblock_output_shape.w() - 1)) /
(threadblock_output_shape.w());
pq_divmod = FastDivmod(tiles_p * tiles_q);
q_divmod = FastDivmod(tiles_q);
// next S
inc_next[0] = problem_size.dilation_w;
// next R
inc_next[1] = (activation_tile_w * problem_size.dilation_h - (problem_size.S - 1) * problem_size.dilation_w);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for DepthwiseFpropActivationDirect2dConvTileAccessIteratorFixedStrideDilation
template <>
struct Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams<layout::TensorNHWC> {
using Layout = layout::TensorNHWC;
Layout layout;
FastDivmod pq_divmod;
FastDivmod q_divmod;
int activation_size;
//
// Methods
//
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams() {}
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvActivationIteratorFixedStrideDilationParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< Layout object
MatrixCoord threadblock_shape, ///< Threadblock Shape
Layout::TensorCoord threadblock_output_shape, ///< Output tile Shape per threadblock
const int activation_size_ ///< Activation size loaded by iterator
)
: layout(layout),
activation_size(activation_size_) {
// Fastdivmod for output P, Q
int tiles_p =
(problem_size.P + (threadblock_output_shape.h() - 1)) / (threadblock_output_shape.h());
int tiles_q =
(problem_size.Q + (threadblock_output_shape.w() - 1)) / (threadblock_output_shape.w());
pq_divmod = FastDivmod(tiles_p * tiles_q);
q_divmod = FastDivmod(tiles_q);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure used for DepthwiseFpropFilterDirectConvTileAccessIteratorOptimized
template <>
struct Depthwise2dFpropDirectConvFilterIteratorParams<layout::TensorNHWC> {
using Layout = layout::TensorNHWC;
Layout layout;
int filter_size;
bool is_convolution;
//
// Methods
//
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvFilterIteratorParams() {}
CUTLASS_HOST_DEVICE
Depthwise2dFpropDirectConvFilterIteratorParams(
Conv2dProblemSize const &problem_size,
Layout const &layout, ///< Layout object
MatrixCoord threadblock_shape, ///< Threadblock Shape
const int filter_size_) ///< Filter size loaded by iterator
: layout(layout),
filter_size(filter_size_),
is_convolution(problem_size.mode == Mode::kConvolution){}
};
} // namespace threadblock
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/threadblock/depthwise_direct_conv_params.h/0 | {
"file_path": "include/cutlass/conv/threadblock/depthwise_direct_conv_params.h",
"repo_id": "include",
"token_count": 2901
} | 21 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Describes the lane policy used by warp-level matrix multiply operators targeting SIMT
instructions
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma_simt_policy.h"
#include "cutlass/gemm/warp/mma_simt_tile_iterator.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Iterates over operands to warp-level matrix multiply operations targeting SIMT instructions
///
/// concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Operand identity
cutlass::gemm::Operand Operand,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension - used in sliced-K
int PartitionsK = 1,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize = 1
>
class DepthwiseMmaSimtTileIterator;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization for B operands of row-major layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize>
class DepthwiseMmaSimtTileIterator<Shape_,
cutlass::gemm::Operand::kB,
Element_,
layout::RowMajor,
Policy_,
PartitionsK,
PartitionGroupSize>
: public cutlass::gemm::warp::MmaSimtTileIterator<Shape_,
cutlass::gemm::Operand::kB,
Element_,
layout::RowMajor,
Policy_,
PartitionsK,
PartitionGroupSize> {
using Base = cutlass::gemm::warp::MmaSimtTileIterator<Shape_,
cutlass::gemm::Operand::kB,
Element_,
layout::RowMajor,
Policy_,
PartitionsK,
PartitionGroupSize>;
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static cutlass::gemm::Operand const kOperand = cutlass::gemm::Operand::kB;
/// Element type
using Element = Element_;
/// Layout of policy
using Layout = layout::RowMajor;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = typename Base::TensorRef;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Thread-level shape of a fragment
using ThreadShape = typename Base::ThreadShape;
/// Number of individual loads
using Iterations = typename Base::Iterations;
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
static_assert(Policy::LaneMmaShape::kN == 1, "Each thread should be 1 element per LDS along the k-dim");
private:
MatrixCoord lane_offset_;
int channel_idx_;
int base_channel_idx_;
int warps_n_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
DepthwiseMmaSimtTileIterator():Base() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
DepthwiseMmaSimtTileIterator(
TensorRef ref,
int lane_id
) : Base(ref, lane_id) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
warps_n_ = -1;
channel_idx_ = 0;
base_channel_idx_ = 0;
lane_offset_ = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN);
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
DepthwiseMmaSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
if(warps_n_ == -1){
warps_n_ = coord.column();
}
Base::add_tile_offset(coord);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator. (vector loads)
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kN> *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < Iterations::kRow; ++k) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
void const *ptr = this->ref_.data() +
this->ref_.offset({-(channel_idx_ - base_channel_idx_),
n * Policy::WarpShape::kColumn}) +
pointer_offset / Policy::LaneMmaShape::kN;
// Base_k of a warp + Base_k of current threads.
int thread_k_base_idx =
warps_n_ * Shape::kColumn / Policy::LaneMmaShape::kN + lane_offset_.column();
if (channel_idx_ + k == thread_k_base_idx + n * Policy::WarpShape::kColumn) {
// Depthwise kernel would only do computation when channel == k.
// Loads an element when the current computation channel == the k corresponding to this thread.
arch::shared_load(dst_ptr[n + k * Iterations::kColumn], ptr);
} else {
// Reduce SMEM load
dst_ptr[n + k * Iterations::kColumn].fill(Element(0));
}
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
if(k_group % PartitionGroupSize == 0 && k_group != 0){
base_channel_idx_ = k_group;
}
channel_idx_ = k_group;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Size of filter (concept: gemm::GemmShape<Depth, Height, Width>)
typename FilterShape_,
/// Size of the matrix to load (concept: MatrixShape)
typename ThreadOutputShape_,
/// Size of the matrix to load (concept: MatrixShape)
typename ThreadBlockOutputShape_,
/// Operand identity
cutlass::gemm::Operand Operand,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Iterator algo type
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape = cutlass::MatrixShape<-1, -1>,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape = cutlass::MatrixShape<-1, -1>,
/// Activation Shape loaded by threadblock
typename ActivationShape = cutlass::conv::TensorNHWCShape<-1,-1,-1,-1>,
/// Number of partitions along K dimension - used in sliced-K
int PartitionsK = 1,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize = 1>
class DepthwiseDirect2dConvSimtTileIterator;
/// Specialization for A operands of row-major layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Size of filter (concept: gemm::GemmShape<Depth, Height, Width>)
typename FilterShape_,
/// Size of the matrix to load (concept: TensorNHWC)
typename ThreadOutputShape_,
/// Size of the matrix to load (concept: TensorNHWC)
typename ThreadBlockOutputShape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Iterator algo type
conv::IteratorAlgorithm IteratorAlgorithm,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape,
/// Activation Shape loaded by threadblock
typename ActivationShape,
/// Number of partitions along K dimension - used in sliced-K
int PartitionsK,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize>
class DepthwiseDirect2dConvSimtTileIterator<Shape_,
FilterShape_,
ThreadOutputShape_,
ThreadBlockOutputShape_,
cutlass::gemm::Operand::kA,
Element_,
Policy_,
IteratorAlgorithm,
StrideShape,
DilationShape,
ActivationShape,
PartitionsK,
PartitionGroupSize> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of filter (concept: gemm::GemmShape<Depth, Height, Width>)
using FilterShape = FilterShape_;
/// Shape of tile to load (concept: TensorNHWC)
using ThreadOutputShape = ThreadOutputShape_;
/// Shape of tile to load (concept: TensorNHWC)
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
/// Operand tag
static cutlass::gemm::Operand const kOperand = cutlass::gemm::Operand::kA;
/// Element type
using Element = Element_;
/// Layout of policy
using Layout = layout::RowMajor;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
//
// Derived quantities
//
static_assert(!(Shape::kRow % Policy::WarpShape::kRow),
"The warp-level GEMM M size must be divisible by the number of threads arranged along the M dimension.");
static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero.");
static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero.");
static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero.");
static_assert(Shape::kRow / Policy::WarpShape::kRow > 0, "Shape::kRow / Policy::WarpShape::kRow must be greater than zero.");
// Thread-level shape of a fragment
using ThreadShape = MatrixShape<
ThreadOutputShape::kNHW, // Output tile shape Computed by current threads
ThreadOutputShape::kC
>;
static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN),
"Thread-level GEMM must be divisible by Policy::LaneMmaShape.");
/// Number of individual loads
using Iterations = MatrixShape<
ThreadShape::kRow,
ThreadShape::kColumn / Policy::LaneMmaShape::kN
>;
using ThreadTileCount = MatrixShape<
ThreadBlockOutputShape::kH / ThreadOutputShape::kH,
ThreadBlockOutputShape::kW / ThreadOutputShape::kW
>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, ThreadShape::kCount>;
protected:
/// Internal reference
cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kN>, layout::RowMajor> ref_;
int activation_offset[ThreadOutputShape::kH][ThreadOutputShape::kW][Iterations::kColumn];
int iterator_r_;
int iterator_s_;
int iterator_offset_;
int inc_next_s_ ;
int inc_next_r_ ;
MatrixCoord lane_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator(
TensorRef ref,
int lane_id
) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
// Set channel offset
lane_offset_ = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN);
ref.add_coord_offset(lane_offset_);
ref_.reset(reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(ref.data()),
ref.stride(0) / Policy::LaneMmaShape::kN);
iterator_r_ = 0;
iterator_s_ = 0;
iterator_offset_ = 0;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
template<typename Params>
CUTLASS_HOST_DEVICE
void setup_initial_status(Params const& params) {
inc_next_s_ = params.inc_next[0];
inc_next_r_ = params.inc_next[1];
// Get base HW offset of current threads
int threadgroup = threadIdx.x / (ThreadBlockOutputShape::kC / ThreadOutputShape::kC);
int base_p_ =
(threadgroup / (ThreadTileCount::kColumn)) * ThreadOutputShape::kH;
int base_q_ =
(threadgroup % (ThreadTileCount::kColumn)) * ThreadOutputShape::kW;
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < ThreadOutputShape::kH; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int q = 0; q < ThreadOutputShape::kW; ++q) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < Iterations::kColumn; ++col) {
int base_w = (base_q_ + q) * params.stride[0];
int base_h = (base_p_ + p) * params.stride[1];
int offset = base_h * params.activation_tile_w + base_w;
activation_offset[p][q][col] = offset;
}
}
}
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
// Set warp row and col start
lane_offset_ = MatrixCoord({lane_offset_.row() + coord.row() * Shape::kRow, lane_offset_.column()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
void advance(int32_t pointer_offset) {
ref_.reset(ref_.data() + pointer_offset / sizeof(Element) / Policy::LaneMmaShape::kN);
iterator_s_ = 0;
iterator_r_ = 0;
iterator_offset_ = 0;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &operator++() {
++iterator_s_;
if (iterator_s_ < FilterShape::kColumn) {
iterator_offset_ += inc_next_s_;
return *this;
}
iterator_s_ = 0;
++iterator_r_;
if (iterator_r_ < FilterShape::kRow) {
iterator_offset_ += inc_next_r_;
return *this;
}
iterator_r_ = 0;
iterator_offset_ = 0;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator & operator--() {
// Do nothing
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator. (vector loads)
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kN> *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < ThreadOutputShape::kH; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int q = 0; q < ThreadOutputShape::kW; ++q) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
void const *ptr = ref_.data() +
ref_.offset({activation_offset[p][q][n] + (iterator_offset_),
n * Policy::WarpShape::kColumn}) +
pointer_offset / Policy::LaneMmaShape::kN;
arch::shared_load(dst_ptr[n + q + p * ThreadOutputShape::kW], ptr);
}
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
// Do nothing at present.
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, Index pointer_offset) const {
store_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Specialization for A operands of row-major layouts
///
/// Concept: MutableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Size of filter (concept: gemm::GemmShape<Depth, Height, Width>)
typename FilterShape_,
/// Size of the matrix to load (concept: TensorNHWC)
typename ThreadOutputShape_,
/// Size of the matrix to load (concept: TensorNHWC)
typename ThreadBlockOutputShape_,
/// Data type of A elements
typename Element_,
/// Shape of the warp in units of thread (concept: MmaSimtPolicy)
typename Policy_,
/// Stride ( MatrixShape<Height, Width> )
typename StrideShape_,
/// Dilation ( MatrixShape<Height, Width> )
typename DilationShape_,
/// Activation Shape loaded by threadblock
typename ActivationShape_,
/// Number of partitions along K dimension - used in sliced-K
int PartitionsK,
/// Group Size along kPartition - used in sliced-K
int PartitionGroupSize>
class DepthwiseDirect2dConvSimtTileIterator<Shape_,
FilterShape_,
ThreadOutputShape_,
ThreadBlockOutputShape_,
cutlass::gemm::Operand::kA,
Element_,
Policy_,
IteratorAlgorithm::kFixedStrideDilation,
StrideShape_,
DilationShape_,
ActivationShape_,
PartitionsK,
PartitionGroupSize> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of filter (concept: gemm::GemmShape<Depth, Height, Width>)
using FilterShape = FilterShape_;
/// Shape of tile to load (concept: TensorNHWC)
using ThreadOutputShape = ThreadOutputShape_;
/// Shape of tile to load (concept: TensorNHWC)
using ThreadBlockOutputShape = ThreadBlockOutputShape_;
/// Stride ( MatrixShape<Height, Width> )
using StrideShape = StrideShape_;
/// Dilation ( MatrixShape<Height, Width> )
using DilationShape = DilationShape_;
/// Activation Shape loaded by threadblock
using ActivationShape = ActivationShape_;
/// Operand tag
static cutlass::gemm::Operand const kOperand = cutlass::gemm::Operand::kA;
/// Element type
using Element = Element_;
/// Layout of policy
using Layout = layout::RowMajor;
/// Decomposition of elements among threads
using Policy = Policy_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
//
// Derived quantities
//
static_assert(!(Shape::kRow % Policy::WarpShape::kRow),
"The warp-level GEMM M size must be divisible by the number of threads arranged "
"along the M dimension.");
static_assert(Shape::kRow > 0, "Shape::kRow must be greater than zero.");
static_assert(Shape::kColumn > 0, "Shape::kColumn must be greater than zero.");
static_assert(Policy::WarpShape::kRow > 0, "Policy::WarpShape::kRow must be greater than zero.");
static_assert(Shape::kRow / Policy::WarpShape::kRow > 0,
"Shape::kRow / Policy::WarpShape::kRow must be greater than zero.");
// Activations loaded by threadblock
static int const ThreadActivationShapeH = (ThreadOutputShape::kH - 1) * StrideShape::kRow +
(FilterShape::kRow - 1) * DilationShape::kRow + 1;
static int const ThreadActivationShapeW = (ThreadOutputShape::kW - 1) * StrideShape::kColumn +
(FilterShape::kColumn - 1) * DilationShape::kColumn + 1;
using ThreadActivationShape = cutlass::conv::
TensorNHWCShape<1, ThreadActivationShapeH, ThreadActivationShapeW, ThreadOutputShape::kC>;
// Thread-level shape of a fragment
using ThreadShape =
MatrixShape<ThreadOutputShape::kNHW,
ThreadOutputShape::kC>;
static_assert(!(ThreadShape::kColumn % Policy::LaneMmaShape::kN),
"Thread-level GEMM must be divisible by Policy::LaneMmaShape.");
/// Number of individual loads
using Iterations =
MatrixShape<ThreadShape::kRow, ThreadShape::kColumn / Policy::LaneMmaShape::kN>;
using ThreadTileCount = MatrixShape<ThreadBlockOutputShape::kH / ThreadOutputShape::kH,
ThreadBlockOutputShape::kW / ThreadOutputShape::kW>;
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, ThreadShape::kCount>;
protected:
/// Internal reference
cutlass::TensorRef<Array<Element, Policy::LaneMmaShape::kN>, layout::RowMajor> ref_;
Array<Element, Policy::LaneMmaShape::kN>
activation[ThreadActivationShape::kH][ThreadActivationShape::kW][Iterations::kColumn];
int iterator_r_;
int iterator_s_;
MatrixCoord lane_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator() {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator(TensorRef ref, int lane_id) {
// compute offset based on thread ID and lane layout
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
// Set channel offset
lane_offset_ = lane_layout.inverse(lane_id) * MatrixCoord(0, Policy::LaneMmaShape::kN);
ref.add_coord_offset(lane_offset_);
ref_.reset(reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(ref.data()),
ref.stride(0) / Policy::LaneMmaShape::kN);
iterator_r_ = 0;
iterator_s_ = 0;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
template <typename Params>
CUTLASS_HOST_DEVICE void setup_initial_status(
Params const ¶ms) {
// Get base HW offset of current threads
int threadgroup = threadIdx.x / (ThreadBlockOutputShape::kC / ThreadOutputShape::kC);
int base_h =
(threadgroup / (ThreadTileCount::kColumn)) * ThreadOutputShape::kH * StrideShape::kRow;
int base_w =
(threadgroup % (ThreadTileCount::kColumn)) * ThreadOutputShape::kW * StrideShape::kColumn;
CUTLASS_PRAGMA_UNROLL
for (int h = 0; h < ThreadActivationShape::kH; ++h) {
CUTLASS_PRAGMA_UNROLL
for (int w = 0; w < ThreadActivationShape::kW; ++w) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < Iterations::kColumn; ++col) {
int offset = (base_h + h) * ActivationShape::kW + (base_w + w);
void const *ptr = ref_.data() + ref_.offset({offset, col * Policy::WarpShape::kColumn});
arch::shared_load(activation[h][w][col], ptr);
}
}
}
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &add_tile_offset(TensorCoord const &coord) {
// Set warp row and col start
lane_offset_ =
MatrixCoord({lane_offset_.row() + coord.row() * Shape::kRow, lane_offset_.column()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
void advance(int32_t pointer_offset) {
ref_.reset(ref_.data() + pointer_offset / sizeof(Element) / Policy::LaneMmaShape::kN);
iterator_s_ = 0;
iterator_r_ = 0;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &operator++() {
++iterator_s_;
if (iterator_s_ < FilterShape::kColumn) {
return *this;
}
iterator_s_ = 0;
++iterator_r_;
if (iterator_r_ < FilterShape::kRow) {
return *this;
}
iterator_r_ = 0;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
DepthwiseDirect2dConvSimtTileIterator &operator--() {
// Do nothing
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator. (vector loads)
CUTLASS_HOST_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) const {
Array<Element, Policy::LaneMmaShape::kN> *dst_ptr =
reinterpret_cast<Array<Element, Policy::LaneMmaShape::kN> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < ThreadOutputShape::kH; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int q = 0; q < ThreadOutputShape::kW; ++q) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Iterations::kColumn; ++n) {
const int h = p * StrideShape::kRow + iterator_r_ * DilationShape::kRow;
const int w = q * StrideShape::kColumn + iterator_s_ * DilationShape::kColumn;
dst_ptr[n + q + p * ThreadOutputShape::kW] = activation[h][w][n];
}
}
}
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { load_with_pointer_offset(frag, 0); }
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) const {
// Do nothing at present.
}
/// Stores a fragment to memory at the location pointed to by the iterator
CUTLASS_HOST_DEVICE
void store(Fragment const &frag, Index pointer_offset) const {
store_with_pointer_offset(frag, 0);
}
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
// no operation here
}
};
} // namespace warp
} // namespace conv
} // namespace cutlass
| include/cutlass/conv/warp/mma_depthwise_simt_tile_iterator.h/0 | {
"file_path": "include/cutlass/conv/warp/mma_depthwise_simt_tile_iterator.h",
"repo_id": "include",
"token_count": 11926
} | 22 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This extends the contents of cutlass/functional.h with frequently used activation functions.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/constants.h"
#include "cutlass/complex.h"
#include "cutlass/array.h"
#include "cutlass/half.h"
#include "cutlass/functional.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
// Identity operator
template <typename T>
struct Identity {
static const bool kIsHeavy = false;
CUTLASS_HOST_DEVICE
T operator()(T value) const {
return value;
}
};
template <typename T, int N>
struct Identity<Array<T, N> > {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> value) const {
return value;
}
};
/// Scale operator
template <typename T>
struct Scale {
struct Arguments {
using scale_type = T;
T scale = T(1);
};
CUTLASS_HOST_DEVICE
T operator()(T value, T scale) const {
multiplies<T> mul;
return mul(scale, value);
}
CUTLASS_HOST_DEVICE
T operator()(T value, Arguments args = Arguments()) const {
return this->operator()(value, args.scale);
}
};
template <typename T, int N>
struct Scale<Array<T, N>> {
using Arguments = typename Scale<T>::Arguments;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> values, T scale) const {
multiplies<Array<T, N>> mul;
return mul(scale, values);
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> values, Arguments args = Arguments()) const {
return this->operator()(values, args.scale);
}
};
/// Specialization to compose other activations with a defined unary operator
/// e.g. Scale<Identity<T>>
template <template <class> class Activation, typename T>
struct Scale<Activation<T>> {
using Arguments = typename Scale<T>::Arguments;
CUTLASS_HOST_DEVICE
T operator()(T value, typename Arguments::scale_type scale) const {
multiplies<T> mul;
Activation<T> act;
return mul(scale, act(value));
}
CUTLASS_HOST_DEVICE
T operator()(T value, Arguments args = Arguments()) const {
return this->operator()(value, args.scale);
}
};
/// ReLu operator - propagates NaNs
/// Always put threshold in the right hand side of max to propagate NaN.
template <typename T>
struct ReLu {
static const bool kIsHeavy = false;
CUTLASS_HOST_DEVICE
T operator()(T threshold, T value) const {
maximum<T> mx;
return mx(value, threshold);
}
CUTLASS_HOST_DEVICE
T operator()(T value) const {
maximum<T> mx;
return mx(value, T(0));
}
};
template <typename T>
using ReLU = ReLu<T>;
template <typename T, int N>
struct ReLu<Array<T, N>> {
static const bool kIsHeavy = false;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(T const & threshold, Array<T, N> const &frag) const {
maximum<Array<T, N>> mx;
return mx(frag, threshold);
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &frag) const {
maximum<Array<T, N>> mx;
return mx(frag, T(0));
}
};
// Generic clamp
template <typename T>
struct Clamp {
struct Arguments {
T lower_bound = CUTLASS_STL_NAMESPACE::numeric_limits<T>::lowest();
T upper_bound = CUTLASS_STL_NAMESPACE::numeric_limits<T>::max();
};
CUTLASS_HOST_DEVICE
T operator()(T const& value, T const& lower_bound, T const& upper_bound) const {
maximum<T> mx;
minimum<T> mn;
return mn(mx(value, lower_bound), upper_bound);
}
CUTLASS_HOST_DEVICE
T operator()(T const& value, Arguments const& args = Arguments()) const {
return this->operator()(value, args.lower_bound, args.upper_bound);
}
};
template <typename T, int N>
struct Clamp<Array<T,N>> {
using Arguments = typename Clamp<T>::Arguments;
CUTLASS_HOST_DEVICE
Array<T,N> operator()(Array<T,N> const& values, T const& lower_bound, T const& upper_bound) const {
maximum<Array<T,N>> mx;
minimum<Array<T,N>> mn;
return mn(mx(values, lower_bound), upper_bound);
}
CUTLASS_HOST_DEVICE
Array<T,N> operator()(Array<T,N> const& values, Arguments const& args = Arguments()) const {
return this->operator()(values, args.lower_bound, args.upper_bound);
}
};
// Leaky Relu operator
template <typename T>
struct LeakyReLU {
static const bool kIsHeavy = false;
struct Arguments {
T leaky_alpha = T(0);
};
CUTLASS_HOST_DEVICE
T operator()(T const& value, T const& leaky_alpha) const {
T res = value > T(0) ? value : value * leaky_alpha;
return res;
}
CUTLASS_HOST_DEVICE
T operator()(T const& value, Arguments const& args = Arguments()) const {
this->operator()(value, args.leaky_alpha);
}
};
template <typename T, int N>
struct LeakyReLU<Array<T, N> > {
static const bool kIsHeavy = false;
using Arguments = typename LeakyReLU<T>::Arguments;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& values, T const& leaky_alpha) const {
Array<T, N> y;
LeakyReLU<T> leaky_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < int(values.size()); ++i) {
y[i] = leaky_op(values[i], leaky_alpha);
}
return y;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& values, Arguments const& args = Arguments()) const {
return this->operator()(values, args.leaky_alpha);
}
};
// Tanh operator
template <typename T>
struct Tanh {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
T operator()(T const &value) const {
return fast_tanh(value);
}
};
template <typename T, int N>
struct Tanh<Array<T, N> > {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Array<T, N> y;
Tanh<T> tanh_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = tanh_op(value[i]);
}
return y;
}
};
template <int N>
struct Tanh<Array<half_t, N>> {
using T = half_t;
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& z) const {
fast_tanh_op<Array<T, N>> tanh;
return tanh(z);
}
};
// Sigmoid operator
template <typename T>
struct Sigmoid {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
T operator()(T const &value) const {
return T(1) / (T(1) + fast_exp(-value));
}
};
template <typename T, int N>
struct Sigmoid<Array<T, N> > {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Array<T, N> y;
Sigmoid<T> sigmoid_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = sigmoid_op(value[i]);
}
return y;
}
};
template <int N>
struct Sigmoid<Array<half_t, N>> {
using T = half_t;
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& z) const {
plus<Array<T, N>> add;
#if defined(CUTLASS_USE_TANH_FOR_SIGMOID)
multiplies<Array<T, N>> mul;
fast_tanh_op<Array<T, N>> tanh;
return mul(add(tanh(mul(z, cutlass::constants::half<T>())), cutlass::constants::one<T>()),
cutlass::constants::half<T>());
#else
divides<Array<T, N>> div;
negate<Array<T, N>> neg;
fast_exp_op<Array<T, N>> fast_exp;
return div(cutlass::constants::one<T>(),
add(cutlass::constants::one<T>(),
fast_exp(neg(z))));
#endif
}
};
// SiLu (swish) operator introduced by Elfwing et al. in the following paper
// "Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning" (2017)
// https://arxiv.org/pdf/1702.03118.pdf
// It is used in EfficientNet and YOLOv5, for example.
// Reference: https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html
template <typename T>
struct SiLu {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
T operator()(T const &value) const {
Sigmoid<T> sigmoid;
return value * sigmoid(value);
}
};
template <typename T, int N>
struct SiLu<Array<T, N>> {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Sigmoid<Array<T, N>> sigmoid_op;
multiplies<Array<T, N>> mul;
return mul(value, sigmoid_op(value));
}
};
template <typename T>
using ScaledSiLu = Scale<SiLu<T>>;
// Hardswish operator introduced by Howard et al. in the following paper
// "Searching for MobileNetV3" (2019)
// https://arxiv.org/pdf/1905.02244.pdf
// It is used in models based on MobilenetNetV3.
// Reference: https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html
template <typename T>
struct HardSwish {
static const bool kIsHeavy = false;
CUTLASS_HOST_DEVICE
T operator()(T const &x) const {
minimum<T> mn;
maximum<T> mx;
T relu6 = mn(mx(x + T(3), T(0)), T(6));
return x * relu6 / T(6);
}
};
template <>
struct HardSwish<float> {
using T = float;
static const bool kIsHeavy = false;
CUTLASS_HOST_DEVICE
T operator()(T const &x) const {
minimum<T> mn;
maximum<T> mx;
T relu6 = mn(mx(x + T(3), T(0)), T(6));
return x * relu6 * 0.16666667f;
}
};
template <typename T, int N>
struct HardSwish<Array<T, N> > {
static const bool kIsHeavy = false;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Array<T, N> y;
HardSwish<T> hardswish_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = hardswish_op(value[i]);
}
return y;
}
};
template <int N>
struct HardSwish<Array<half_t, N> > {
using T = half_t;
static const bool kIsHeavy = false;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
minimum<Array<T, N> > mn;
maximum<Array<T, N> > mx;
multiplies<Array<T, N> > mul;
plus<Array<T, N> > add;
return mul(mul(mn(mx(add(value, T(3)), T(0)), T(6)), value), T(0.16666667f));
}
};
//
// GELU function definitions implemented as described by
// Hendrycks, D., and Gimpel, K. in
// "Gaussian Error Linear Units (GELUs)." (2020)
// https://arxiv.org/pdf/1606.08415.pdf
//
// Floating-point constants are Taylor coefficients described in the paper.
//
// GELU operator
template <typename T>
struct GELU {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
T operator()(T const &value) const {
return T(cutlass::constants::half<T>() * value *
(cutlass::constants::one<T>() + (T)erff((float)(value * cutlass::constants::half_root_two<T>()))));
}
};
template <>
struct GELU<float> {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
float operator()(float const &value) const {
return cutlass::constants::half<float>() * value *
(cutlass::constants::one<float>() + erff(value * cutlass::constants::half_root_two<float>() ));
}
};
template <>
struct GELU<double> {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
double operator()(double const &value) const {
return cutlass::constants::half<double>() * value *
(cutlass::constants::one<double>() + erf( value * cutlass::constants::half_root_two<double>() ));
}
};
template <typename T, int N>
struct GELU<Array<T, N> > {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Array<T, N> y;
GELU<T> gelu_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = gelu_op(value[i]);
}
return y;
}
};
template <typename T>
using ScaledGELU = Scale<GELU<T>>;
// GELU operator implemented using the Taylor series approximation
template <typename T>
struct GELU_taylor {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
T operator()(T const &z) const {
T k0 = T(0.7978845608028654);
T k1 = T(0.044715);
return T(cutlass::constants::half<T>() * z *
(cutlass::constants::one<T>() + fast_tanh(k0 * z * (cutlass::constants::one<T>() + k1 * z * z))));
}
};
template <int N>
struct GELU_taylor<Array<half_t, N> > {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const &z) const {
using T = half_t;
Array<half_t, N> y;
half_t k0 = half_t(0.7978845608028654);
half_t k1 = half_t(0.044715);
multiply_add<Array<half_t, N>> fma;
multiplies<Array<half_t, N>> mul;
plus<Array<half_t, N>> add;
fast_tanh_op<Array<half_t, N>> tanh;
Array<half_t, N> u = mul(mul(k0, z), fma(mul(k1, z), z, cutlass::constants::one<T>()));
y = mul(mul(z, cutlass::constants::half<T>()), add(cutlass::constants::one<T>(), tanh(u)));
return y;
}
};
template <typename T, int N>
struct GELU_taylor<Array<T, N> > {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &value) const {
Array<T, N> y;
GELU_taylor<T> gelu_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = gelu_op(value[i]);
}
return y;
}
};
template <typename T>
using ScaledGELU_taylor = Scale<GELU_taylor<T>>;
/// Computes backwards pass for GELU operator assuming d_t is the layer gradient and
/// z is computed from the forward pass.
template <typename T>
struct dGELU {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
T operator()(T const &d_t, T const &z) const {
T k0 = T(0.7978845608028654);
T k1 = T(0.044715);
T k2 = T(0.1070322243);
T tanh_out = fast_tanh(k0 * z * (1 + k1 * z * z));
T ff = constants::half<T>() * z * ((1 - tanh_out * tanh_out) * (k0 + k2 * z * z)) +
constants::half<T>() * (1 + tanh_out);
return ff * d_t;
}
};
template <typename T, int N>
struct dGELU<Array<T, N> > {
static const bool kIsHeavy = true;
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &d_t, Array<T, N> const &z) const {
Array<T, N> y;
dGELU<T> gelu_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = gelu_op(d_t[i], z[i]);
}
return y;
}
};
template <typename T>
struct dReLU {
CUTLASS_HOST_DEVICE
T operator()(T d_t, bool d_relu) const {
return d_relu ? d_t : T(0);
}
template <typename U>
CUTLASS_HOST_DEVICE
T operator()(T d_t, U d_relu) const {
return operator()(d_t, static_cast<bool>(d_relu));
}
};
template <typename T, int N>
struct dReLU<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& d_t, bool const (&d_relu)[N]) const {
Array<T, N> y;
dReLU<T> relu_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = relu_op(d_t[i], d_relu[i]);
}
return y;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& d_t, Array<uint1b_t, N> const& d_relu) const {
UnpackPredicates<N> unpack_op;
bool preds[N];
unpack_op(preds, d_relu);
return operator()(d_t, preds);
}
template <typename U>
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& d_t, Array<U, N> const& d_relu) const {
Array<T, N> y;
dReLU<T> relu_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = relu_op(d_t[i], d_relu[i]);
}
return y;
}
};
/// Computes backwards pass for ReLU operator assuming d_t is the layer gradient and
/// z is computed from the forward pass.
template <typename T>
struct dReLU_Z {
CUTLASS_HOST_DEVICE
T operator()(T d_t, T z) const {
return z < 0 ? T(0) : d_t;
}
};
template <typename T, int N>
struct dReLU_Z<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const& d_t, Array<T, N> const& z) const {
Array<T, N> y;
dReLU_Z<T> relu_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = relu_op(d_t[i], z[i]);
}
return y;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/thread/activation.h/0 | {
"file_path": "include/cutlass/epilogue/thread/activation.h",
"repo_id": "include",
"token_count": 7254
} | 23 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination with a maximum operation used by epilogues.
*/
#pragma once
#include "cutlass/half.h"
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/epilogue/thread/activation.h"
#include "cutlass/epilogue/thread/scale_type.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Single source of truth for whether to unroll for `LinearCombinationClamp()`
constexpr bool LinearCombinationReluIsHeavy() {
return false;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements.
///
/// D = alpha * accumulator + beta * source + uniform
///
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data to store
typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type
typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination
ScaleType::Kind Scale = ScaleType::Default, ///< Control Alpha and Beta scaling
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
class LinearCombinationRelu {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
static int const kCount = Count;
static const ScaleType::Kind kScale = Scale;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using FragmentCompute = Array<ElementCompute, kCount>;
using FragmentScaleBias = Array<ElementCompute, kCount>;
using FragmentSource = Array<ElementOutput, kCount>;
static FloatRoundStyle const kRound = Round;
static bool const kIsHeavy = detail::LinearCombinationReluIsHeavy();
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute threshold; ///< minimum value that is output
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
threshold(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta = ElementCompute(0),
ElementCompute threshold = ElementCompute(0)
): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr = nullptr,
ElementCompute threshold = ElementCompute(0)
): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
ElementCompute threshold_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationRelu(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
threshold_ = params.threshold;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::OnlyAlphaPerChannelScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
// set to NaN to make ReLU no-op for all except last k partitions
int64_t allones = -1;
threshold_ = reinterpret_cast<ElementCompute const &>(allones);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_source;
multiply_add<FragmentCompute> mul_add_accumulator;
ReLu<FragmentCompute> relu;
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
}
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_accumulator;
ReLu<FragmentCompute> relu;
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
}
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
/// Computes per-channel linear scaling and bias : D = scale * accumulator + bias
/// Scale and Bias are from input Fragment
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentScaleBias const &scale,
FragmentScaleBias const &bias) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform per-channel scale and bias
FragmentCompute intermediate;
multiply_add<FragmentCompute> mul_add_accumulator;
if(Scale == ScaleType::OnlyAlphaPerChannelScaling)
intermediate = mul_add_accumulator(scale, converted_accumulator, bias); // D = scale * Accum + bias
else
intermediate = mul_add_accumulator(alpha_, converted_accumulator, bias); // D = alpha * Accum + bias
ReLu<FragmentCompute> relu;
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
return destination_converter(intermediate);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conditional guards to enable partial specialization for packed integers
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 720) && ((__CUDACC_VER_MAJOR__ > 10) || ((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2)))
/// Applies a linear combination operator to an array of elements.
///
/// D = alpha * accumulator + beta * source + uniform
///
/// Special handling for int types
template <
typename ElementOutput_, ///< Data type used to load and store tensors
int Count, ///< Number of elements computed per operation
ScaleType::Kind Scale, ///< Control Alpha and Beta scaling
FloatRoundStyle Round
>
class LinearCombinationRelu <ElementOutput_, Count, int, float, Scale, Round> {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = int;
using ElementCompute = float;
static bool const kIsHeavy = detail::LinearCombinationReluIsHeavy();
static int const kCount = Count;
static const ScaleType::Kind kScale = Scale;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using FragmentCompute = Array<ElementCompute, kCount>;
using FragmentScaleBias = Array<ElementCompute, kCount>;
using FragmentSource = Array<ElementOutput, kCount>;
static FloatRoundStyle const kRound = Round;
/// Host-constructable parameters structure
struct Params {
ElementCompute alpha; ///< scales accumulators
ElementCompute beta; ///< scales source tensor
ElementCompute threshold; ///< minimum value that is output
ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory
ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
alpha(ElementCompute(1)),
beta(ElementCompute(0)),
threshold(ElementCompute(0)),
alpha_ptr(nullptr),
beta_ptr(nullptr) { }
CUTLASS_HOST_DEVICE
Params(
ElementCompute alpha,
ElementCompute beta = ElementCompute(0),
ElementCompute threshold = ElementCompute(0)
): alpha(alpha), beta(beta), threshold(threshold), alpha_ptr(nullptr), beta_ptr(nullptr) {
}
CUTLASS_HOST_DEVICE
Params(
ElementCompute const *alpha_ptr,
ElementCompute const *beta_ptr = nullptr,
ElementCompute threshold = ElementCompute(0)
): alpha(0), beta(0), threshold(threshold), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr) {
}
};
private:
//
// Data members
//
ElementCompute alpha_;
ElementCompute beta_;
ElementCompute threshold_;
public:
/// Constructs the function object, possibly loading from pointers in host memory
CUTLASS_HOST_DEVICE
LinearCombinationRelu(Params const ¶ms) {
alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha);
beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta);
threshold_ = params.threshold;
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
if (Scale == ScaleType::NoBetaScaling) return true;
if (Scale == ScaleType::OnlyAlphaScaling) return false;
if (Scale == ScaleType::OnlyAlphaPerChannelScaling) return false;
if (Scale == ScaleType::Nothing) return false;
return beta_ != ElementCompute(0);
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {
if (k_partition) {
beta_ = ElementCompute(1);
}
if (k_partition != k_partition_count - 1) {
// set to NaN to make ReLU no-op for all except last k partitions
int64_t allones = -1;
threshold_ = reinterpret_cast<ElementCompute const &>(allones);
}
}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentOutput const &source) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementOutput, kCount, Round> source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_source = source_converter(source);
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_add_source;
multiply_add<FragmentCompute> mul_add_accumulator;
ReLu<FragmentCompute> relu;
if (Scale == ScaleType::NoBetaScaling) {
intermediate = converted_source;
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
} else if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_add_source(beta_, converted_source); // X = beta * C + uniform
intermediate = mul_add_accumulator(alpha_, converted_accumulator, intermediate); // D = alpha * Accum + X
}
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
if (cutlass::platform::numeric_limits<ElementOutput>::is_integer) {
// Convert floats back to INT
FragmentAccumulator scaled_accumulator;
NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter;
scaled_accumulator = compute_converter(intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, int, kCount, Round>
destination_converter;
return destination_converter(scaled_accumulator);
} else {
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
return destination_converter(intermediate);
}
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
FragmentCompute intermediate;
multiplies<FragmentCompute> mul_accumulator;
ReLu<FragmentCompute> relu;
if (Scale == ScaleType::Nothing) {
intermediate = converted_accumulator;
} else {
intermediate = mul_accumulator(alpha_, converted_accumulator); // D = alpha * Accum
}
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
if (cutlass::platform::numeric_limits<ElementOutput>::is_integer) {
// Convert floats back to INT
FragmentAccumulator scaled_accumulator;
NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter;
scaled_accumulator = compute_converter(intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, int, kCount, Round>
destination_converter;
return destination_converter(scaled_accumulator);
} else {
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
return destination_converter(intermediate);
}
}
/// Computes per-channel linear scaling and bias : D = scale * accumulator + bias
/// Scale and Bias are from input Fragment
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const &accumulator,
FragmentScaleBias const &scale,
FragmentScaleBias const &bias) const {
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter;
FragmentCompute converted_accumulator = accumulator_converter(accumulator);
// Perform per-channel scale and bias
FragmentCompute intermediate;
multiply_add<FragmentCompute> mul_add_accumulator;
if(Scale == ScaleType::OnlyAlphaPerChannelScaling)
intermediate = mul_add_accumulator(scale, converted_accumulator, bias); // D = scale * Accum + bias
else
intermediate = mul_add_accumulator(alpha_, converted_accumulator, bias); // D = alpha * Accum + bias
ReLu<FragmentCompute> relu;
// Compute threshold optionally
intermediate = relu(threshold_, intermediate);
if (cutlass::platform::numeric_limits<ElementOutput>::is_integer) {
// Convert floats back to INT
FragmentAccumulator scaled_accumulator;
NumericArrayConverter<int, ElementCompute, kCount, Round> compute_converter;
scaled_accumulator = compute_converter(intermediate);
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, int, kCount, Round>
destination_converter;
return destination_converter(scaled_accumulator);
} else {
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
return destination_converter(intermediate);
}
}
};
#endif // Conditional guards to enable partial specialization for packed integers
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/thread/linear_combination_relu.h/0 | {
"file_path": "include/cutlass/epilogue/thread/linear_combination_relu.h",
"repo_id": "include",
"token_count": 6866
} | 24 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops on Volta.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/epilogue/thread/linear_combination_relu.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/linear_combination_sigmoid.h"
#include "cutlass/epilogue/thread/linear_combination_planar_complex.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h"
#include "cutlass/epilogue/threadblock/shared_load_iterator.h"
#include "cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h"
#include "cutlass/epilogue/warp/tile_iterator_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/layout/permute.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess,
bool ScatterD = false,
typename PermuteDLayout = layout::NoPermute
>
struct DefaultEpilogueVoltaTensorOp {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess,
ElementAccumulator
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap,
ElementOutput,
ScatterD,
PermuteDLayout
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
static int const kSharedMemAlignment = sizeof_bits<ElementAccumulator>::value * WarpTileIterator::kElementsPerAccess / 8;
static_assert(kSharedMemAlignment == 8, "Shared memory alignment must be 8B");
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator,
kSharedMemAlignment
>;
/// Hard-coded padding elements added
using Padding = typename WarpTileIterator::Padding;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueVoltaTensorOpStridedDgrad {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess,
ElementAccumulator
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorStridedDgrad<
OutputTileThreadMap,
ElementOutput
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
static int const kSharedMemAlignment = sizeof_bits<ElementAccumulator>::value * WarpTileIterator::kElementsPerAccess / 8;
static_assert(kSharedMemAlignment == 8, "Shared memory alignment must be 8B");
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator,
kSharedMemAlignment
>;
/// Hard-coded padding elements added
using Padding = typename WarpTileIterator::Padding;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
int Rank,
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess
>
struct DefaultEpilogueVoltaTensorOpAffineRankN {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess,
ElementAccumulator
>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankN<
OutputTileThreadMap,
ElementOutput,
Rank
>;
using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
using WarpTileIterator = cutlass::epilogue::warp::TileIteratorVoltaTensorOp<
typename WarpMmaTensorOp::Shape,
gemm::GemmShape<32, 32, 4>,
ElementAccumulator,
LayoutC
>;
static int const kSharedMemAlignment = sizeof_bits<ElementAccumulator>::value * WarpTileIterator::kElementsPerAccess / 8;
static_assert(kSharedMemAlignment == 8, "Shared memory alignment must be 8B");
using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator,
kSharedMemAlignment
>;
/// Hard-coded padding elements added
using Padding = typename WarpTileIterator::Padding;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h",
"repo_id": "include",
"token_count": 3440
} | 25 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/array.h"
#include "cutlass/array_planar_complex.h"
#include "cutlass/layout/vector.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/transform/pitch_linear_thread_map.h"
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
#include "cutlass/epilogue/threadblock/epilogue_base.h"
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator for planar-complex output representations.
///
/// Note, as with most CUTLASS components for planar complex, the template arguments describe
/// the underlying real data type.
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_, ///< Output operator
typename Padding_ ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape)
>
class EpiloguePlanarComplex {
public:
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
/// Output layout is always row-major
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = ArrayPlanarComplex<
typename WarpMmaOperator::FragmentC::Element,
WarpMmaOperator::FragmentC::kElements
>;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Shape of each warp-level operation
using WarpShape = typename WarpMmaOperator::Shape;
/// Number of warps
using WarpCount = gemm::GemmShape<
Shape::kM / WarpShape::kM,
Shape::kN / WarpShape::kN,
kPartitionsK
>;
/// Shared memory allocation
struct SharedStorage {
//
// Type definitions
//
/// Element type of shared memory
using Element = typename WarpTileIterator::Element;
/// Tensor reference to shared memory allocation
using TensorRef = typename WarpTileIterator::TensorRef;
/// Layout of shared memory allocation
using Layout = typename WarpTileIterator::Layout;
/// Logical shape of the shared memory tile written to by all warps.
using Shape = MatrixShape<
WarpCount::kM * WarpTileIterator::Shape::kRow * WarpCount::kK,
WarpCount::kN * WarpTileIterator::Shape::kColumn
>;
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<
Shape::kRow + Padding::kRow,
Shape::kColumn + Padding::kColumn
>;
static int const kImaginaryStride = StorageShape::kCount;
//
// Data members
//
AlignedBuffer<Element, kImaginaryStride * 2> storage;
//
// Methods
//
/// Returns a pointer to the shared memory buffer
CUTLASS_DEVICE
Element *data() {
return storage.data();
}
/// Returns a tensor reference to the shared memory buffer
CUTLASS_DEVICE
TensorRef reference() {
return TensorRef(
storage.data(),
Layout::packed({StorageShape::kRow, StorageShape::kColumn}));
}
};
private:
//
// Data members
//
SharedStorage &shared_storage_;
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Stores a warp's fragment of accumulators to SMEM
WarpTileIterator warp_tile_iterator_;
public:
/// Constructor
CUTLASS_DEVICE
EpiloguePlanarComplex(
SharedStorage &shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
):
shared_storage_(shared_storage),
shared_load_iterator_(shared_storage.reference(), thread_idx),
warp_tile_iterator_(shared_storage.reference(), lane_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to three coordinates:
//
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_k = warp_idx / (WarpCount::kM * WarpCount::kN);
int warp_mn = warp_idx % (WarpCount::kM * WarpCount::kN);
int warp_m = warp_mn % WarpCount::kM;
int warp_n = warp_mn / WarpCount::kM;
MatrixCoord warp_offset{warp_k * WarpCount::kM + warp_m, warp_n};
warp_tile_iterator_.add_tile_offset(warp_offset);
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const &output_op, ///< Output operator
OutputTileIterator destination_iterator_real, ///< Tile iterator for destination
OutputTileIterator destination_iterator_imag, ///< Tile iterator for destination
AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator_real, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
OutputTileIterator source_iterator_imag) { ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
typename OutputTileIterator::Fragment source_fragment_real;
typename OutputTileIterator::Fragment source_fragment_imag;
if (!output_op.is_source_needed()) {
source_iterator_real.clear_mask();
source_iterator_imag.clear_mask();
}
source_fragment_real.clear();
source_fragment_imag.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator_real(accumulators.real);
AccumulatorFragmentIterator accum_fragment_iterator_imag(accumulators.imag);
//
// Iterate over accumulator tile
//
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Load the source
//
source_iterator_real.load(source_fragment_real);
source_iterator_imag.load(source_fragment_imag);
++source_iterator_real;
++source_iterator_imag;
//
// Convert and store fragment
//
__syncthreads();
typename AccumulatorFragmentIterator::Fragment accum_fragment_real;
typename AccumulatorFragmentIterator::Fragment accum_fragment_imag;
accum_fragment_iterator_real.load(accum_fragment_real);
accum_fragment_iterator_imag.load(accum_fragment_imag);
++accum_fragment_iterator_real;
++accum_fragment_iterator_imag;
this->warp_tile_iterator_.store(accum_fragment_real);
this->warp_tile_iterator_.store_with_pointer_offset(accum_fragment_imag, SharedStorage::kImaginaryStride);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment_real[kPartitionsK];
typename SharedLoadIterator::Fragment aligned_accum_fragment_imag[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment_real[0]);
shared_load_iterator_.load_with_pointer_offset(aligned_accum_fragment_imag[0], SharedStorage::kImaginaryStride);
// If the number of k-slices is > 1 - perform a reduction amongst the k-slices
static_assert(kPartitionsK == 1, "Sliced-K not supported for planar complex at this time");
//
// Compute the output result
//
typename OutputTileIterator::Fragment output_fragment_real;
typename OutputTileIterator::Fragment output_fragment_imag;
apply_output_operator_(
output_fragment_real,
output_fragment_imag,
output_op,
aligned_accum_fragment_real[0],
aligned_accum_fragment_imag[0],
source_fragment_real,
source_fragment_imag);
//
// Store the final result
//
destination_iterator_real.store(output_fragment_real);
destination_iterator_imag.store(output_fragment_imag);
++destination_iterator_real;
++destination_iterator_imag;
}
}
private:
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
typename OutputTileIterator::Fragment &output_fragment_real,
typename OutputTileIterator::Fragment &output_fragment_imag,
OutputOp const &output_op, ///< Output operator
typename SharedLoadIterator::Fragment const &aligned_accum_fragment_real,
typename SharedLoadIterator::Fragment const &aligned_accum_fragment_imag,
typename OutputTileIterator::Fragment const &source_fragment_real,
typename OutputTileIterator::Fragment const &source_fragment_imag) {
OutputAccessType *output_frag_real_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment_real);
OutputAccessType *output_frag_imag_ptr =
reinterpret_cast<OutputAccessType *>(&output_fragment_imag);
AccumulatorAccessType const *compute_frag_real_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment_real);
AccumulatorAccessType const *compute_frag_imag_ptr =
reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment_imag);
OutputAccessType const *source_frag_real_ptr =
reinterpret_cast<OutputAccessType const *>(&source_fragment_real);
OutputAccessType const *source_frag_imag_ptr =
reinterpret_cast<OutputAccessType const *>(&source_fragment_imag);
int const kOutputOpIterations =
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operator
auto result_fragment = output_op(
make_ArrayPlanarComplex(compute_frag_real_ptr[i], compute_frag_imag_ptr[i]),
make_ArrayPlanarComplex(source_frag_real_ptr[i], source_frag_imag_ptr[i])
);
output_frag_real_ptr[i] = result_fragment.real;
output_frag_imag_ptr[i] = result_fragment.imag;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/threadblock/epilogue_planar_complex.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/epilogue_planar_complex.h",
"repo_id": "include",
"token_count": 4937
} | 26 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/tensor_ref.h"
namespace cutlass {
namespace epilogue {
namespace threadblock {
template<
typename TensorLayout_, ///! The original output tensor layout
typename OutputIteratorLayout_, ///! Layout used by epilogue output iterator
typename TensorRef_, ///! Input tensor to epilogue output iterator
conv::Operator ConvOperator, ///! Convolutional operator (Fprop, Dgrad, Wgrad)
typename ConvProblemSize_ ///! Convolutional operator on 2D or 3D problem
>
struct ConvOutputIteratorParameter {
using TensorLayout = TensorLayout_;
using OutputIteratorLayout = OutputIteratorLayout_;
using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord;
using TensorRef = TensorRef_;
static conv::Operator const kConvolutionalOperator = ConvOperator;
using ConvProblemSize = ConvProblemSize_;
/// Wgrad stride idx for implicit gemm algorithm
// Conv2d row-major matrix (KxRSC)
// Conv3d row-major matrix (KxTRSC)
static int const kWgradStrideIdx =
platform::is_same<TensorLayout, layout::TensorNHWC>::value ? 2 : 3;
/// This chooses the appropriate stride element of the C tensor.
static int const kTensorStrideIdx =
(kConvolutionalOperator == conv::Operator::kWgrad ? kWgradStrideIdx : 0);
CUTLASS_HOST_DEVICE
static OutputIteratorLayout layout(const TensorRef & ref) {
return ref.stride(kTensorStrideIdx);
}
CUTLASS_HOST_DEVICE
static OutputTensorCoord extent(ConvProblemSize problem_size) {
return conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn();
}
};
template<
typename TensorRef_, ///! Input tensor to epilogue output iterator
typename ConvProblemSize_ ///! Convolutional operator on 2D or 3D problem
>
struct ConvOutputIteratorParameter<layout::TensorNHWC, layout::TensorNHWC, TensorRef_, conv::Operator::kFprop, ConvProblemSize_> {
using TensorLayout = layout::TensorNHWC;
using OutputIteratorLayout = layout::TensorNHWC;
using MappedLayout = layout::RowMajor;
using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord;
using MappedTensorCoord = typename MappedLayout::TensorCoord;
using TensorRef = TensorRef_;
static conv::Operator const kConvolutionalOperator = conv::Operator::kFprop;
using ConvProblemSize = ConvProblemSize_;
CUTLASS_HOST_DEVICE
static OutputIteratorLayout layout(const TensorRef & ref) {
return ref.stride();
}
CUTLASS_HOST_DEVICE
static MappedTensorCoord extent(ConvProblemSize problem_size) {
return conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn();
}
};
template<
typename TensorRef_, ///! Input tensor to epilogue output iterator
typename ConvProblemSize_ ///! Convolutional operator on 2D or 3D problem
>
struct ConvOutputIteratorParameter<layout::TensorNHWC, layout::TensorNHWC, TensorRef_, conv::Operator::kDeconv, ConvProblemSize_> {
using TensorLayout = layout::TensorNHWC;
using OutputIteratorLayout = layout::TensorNHWC;
using MappedLayout = layout::RowMajor;
using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord;
using MappedTensorCoord = typename MappedLayout::TensorCoord;
using TensorRef = TensorRef_;
static conv::Operator const kConvolutionalOperator = conv::Operator::kDeconv;
using ConvProblemSize = ConvProblemSize_;
CUTLASS_HOST_DEVICE
static OutputIteratorLayout layout(const TensorRef & ref) {
return ref.stride();
}
CUTLASS_HOST_DEVICE
static MappedTensorCoord extent(ConvProblemSize problem_size) {
return conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn();
}
};
template<
typename TensorRef_, ///! Input tensor to epilogue output iterator
typename ConvProblemSize_ ///! Convolutional operator on 2D or 3D problem
>
struct ConvOutputIteratorParameter<layout::TensorNDHWC, layout::TensorNDHWC, TensorRef_, conv::Operator::kFprop, ConvProblemSize_> {
using TensorLayout = layout::TensorNDHWC;
using OutputIteratorLayout = layout::TensorNDHWC;
using MappedLayout = layout::RowMajor;
using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord;
using MappedTensorCoord = typename MappedLayout::TensorCoord;
using TensorRef = TensorRef_;
static conv::Operator const kConvolutionalOperator = conv::Operator::kFprop;
using ConvProblemSize = ConvProblemSize_;
CUTLASS_HOST_DEVICE
static OutputIteratorLayout layout(const TensorRef & ref) {
return ref.stride();
}
CUTLASS_HOST_DEVICE
static MappedTensorCoord extent(ConvProblemSize problem_size) {
return conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn();
}
};
template<
typename TensorRef_, ///! Input tensor to epilogue output iterator
typename ConvProblemSize_ ///! Convolutional operator on 2D or 3D problem
>
struct ConvOutputIteratorParameter<layout::TensorNDHWC, layout::TensorNDHWC, TensorRef_, conv::Operator::kDeconv, ConvProblemSize_> {
using TensorLayout = layout::TensorNDHWC;
using OutputIteratorLayout = layout::TensorNDHWC;
using MappedLayout = layout::RowMajor;
using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord;
using MappedTensorCoord = typename MappedLayout::TensorCoord;
using TensorRef = TensorRef_;
static conv::Operator const kConvolutionalOperator = conv::Operator::kDeconv;
using ConvProblemSize = ConvProblemSize_;
CUTLASS_HOST_DEVICE
static OutputIteratorLayout layout(const TensorRef & ref) {
return ref.stride();
}
CUTLASS_HOST_DEVICE
static MappedTensorCoord extent(ConvProblemSize problem_size) {
return conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn();
}
};
template <
int InterleavedK,
typename TensorRef_,
conv::Operator ConvOperator,
typename ConvProblemSize_
>
struct ConvOutputIteratorParameter<
layout::TensorNCxHWx<InterleavedK>,
layout::TensorNCxHWx<InterleavedK>,
TensorRef_,
ConvOperator,
ConvProblemSize_>
{
using TensorLayout = typename layout::TensorNCxHWx<InterleavedK>;
using OutputIteratorLayout = typename layout::TensorNCxHWx<InterleavedK>;
using OutputTensorCoord = typename OutputIteratorLayout::TensorCoord;
using TensorRef = TensorRef_;
static conv::Operator const kConvolutionalOperator = ConvOperator;
using ConvProblemSize = ConvProblemSize_;
CUTLASS_HOST_DEVICE
static OutputIteratorLayout layout(const TensorRef & ref) {
return ref.stride();
}
CUTLASS_HOST_DEVICE
static OutputTensorCoord extent(ConvProblemSize problem_size) {
return problem_size.output_extent();
}
};
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
| include/cutlass/epilogue/threadblock/output_iterator_parameter.h/0 | {
"file_path": "include/cutlass/epilogue/threadblock/output_iterator_parameter.h",
"repo_id": "include",
"token_count": 3056
} | 27 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile
that participate in one warp-level store operation.
Typically, the accumulator tile is the largest single block of register-backed storage
within the kernel. Storing it to memory is best accomplished by partitioning it into
smaller tiles and storing these sequentially.
Round trips through shared memory during the Epilogue phase require partitioning, as
shared memory capacity is typically insufficient for a threadblock's total accumulator
size.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/epilogue/warp/simt_policy.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Fragment iterator for SIMT accumulator arrangements
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename Operator, ///< matrix multiply operation (concept: arch::Mma)
typename Layout, ///< target shared memory layout
typename MmaSimtPolicy ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class FragmentIteratorSimt;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major shared memory
template <
typename WarpShape_, ///< shape of the warp-level GEMM tile
typename Operator_ , ///< matrix multiply operator (concept: arch::Mma)
typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy)
>
class FragmentIteratorSimt<WarpShape_, Operator_, layout::RowMajor, MmaSimtPolicy_> {
public:
using WarpShape = WarpShape_;
using Operator = Operator_;
using Layout = layout::RowMajor;
/// Policy for warp-level epilogue components
using Policy = SimtPolicy<WarpShape, Operator, Layout, MmaSimtPolicy_>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
typename Operator::ElementC,
Policy::kElementsPerIteration>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
typename Operator::ElementC,
Policy::kAccumulatorElementCount>;
using OutputAccumulatorTile = AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
private:
/// Internal access type
using AccessType = Array<typename Operator::ElementC, Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
FragmentIteratorSimt(AccumulatorTile const &accum):
accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0) {
}
/// Increments
CUTLASS_HOST_DEVICE
FragmentIteratorSimt &operator++() {
++index_;
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
FragmentIteratorSimt &operator--() {
--index_;
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, int index_offset = 0) const {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::kAccessesPerIteration; ++n) {
int accumulator_access_offset = index_ * Policy::kAccessesPerIteration + n;
frag_ptr[n] = accumulators_[accumulator_access_offset];
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/epilogue/warp/fragment_iterator_simt.h/0 | {
"file_path": "include/cutlass/epilogue/warp/fragment_iterator_simt.h",
"repo_id": "include",
"token_count": 1642
} | 28 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Define basic numeric operators
This is inspired by the Standard Library's <functional> header.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/platform/platform.h"
#if defined(__CUDACC_RTC__)
#include "cutlass/floating_point_nvrtc.h"
#endif
#include <cuda_runtime.h>
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include <mma.h>
#endif // defined(CUTLASS_ARCH_WMMA_ENABLED)
#ifdef _MSC_VER
// Provides support for alternate operators such as 'and', 'or', ...
#include <iso646.h>
#endif // _MSC_VER
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct absolute_value_op {
CUTLASS_HOST_DEVICE
T operator()(T lhs) const {
return abs(lhs);
}
};
template <>
struct absolute_value_op<float> {
CUTLASS_HOST_DEVICE
float operator()(float lhs) const { return fabs(lhs); }
};
template <typename T>
struct plus {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
lhs += rhs;
return lhs;
}
};
template <typename T>
struct minus {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
lhs -= rhs;
return lhs;
}
};
template <typename T>
struct multiplies {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
lhs *= rhs;
return lhs;
}
};
template <typename T>
struct scale {
T const scaling_factor_;
CUTLASS_HOST_DEVICE
scale(float scaling_factor) : scaling_factor_(scaling_factor) {
}
T operator()(T const &rhs) const {
T result = rhs * scaling_factor_;
return result;
}
};
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530
/// Partial specializations needed when __CUDA_NO_HALF2_OPERATORS__ is set
template<>
struct plus<__half2> {
CUTLASS_HOST_DEVICE
__half2 operator()(__half2 lhs, __half2 const &rhs) const {
return __hadd2(lhs, rhs);
}
};
template<>
struct minus<__half2> {
CUTLASS_HOST_DEVICE
__half2 operator()(__half2 lhs, __half2 const &rhs) const {
return __hsub2(lhs, rhs);
}
};
template<>
struct multiplies<__half2> {
CUTLASS_HOST_DEVICE
__half2 operator()(__half2 lhs, __half2 const &rhs) const {
return __hmul2(lhs, rhs);
}
};
/// Partial specializations needed when __CUDA_NO_HALF_OPERATORS__ is set
template<>
struct plus<__half> {
CUTLASS_HOST_DEVICE
__half operator()(__half lhs, __half const &rhs) const {
return __hadd(lhs, rhs);
}
};
template<>
struct minus<__half> {
CUTLASS_HOST_DEVICE
__half operator()(__half lhs, __half const &rhs) const {
return __hsub(lhs, rhs);
}
};
template<>
struct multiplies<__half> {
CUTLASS_HOST_DEVICE
__half operator()(__half lhs, __half const &rhs) const {
return __hmul(lhs, rhs);
}
};
#endif // defined(__CUDA_ARCH__)
/// Squares with optional conversion
template <typename T, typename Output = T>
struct square {
CUTLASS_HOST_DEVICE
Output operator()(T lhs) const {
multiplies<Output> mul_op;
Output y = Output(lhs);
return mul_op(y, y);
}
};
/// Returns the magnitude squared of an element.
template <typename T, typename Output = T>
struct magnitude_squared {
CUTLASS_HOST_DEVICE
Output operator()(T lhs) const {
multiplies<Output> mul_op;
Output y = Output(lhs);
return mul_op(y, y);
}
};
/// Computes the square of a difference with optional conversion
template <typename T, typename Output = T>
struct square_difference {
CUTLASS_HOST_DEVICE
Output operator()(T lhs, T rhs) const {
multiplies<Output> mul_op;
Output y = Output(lhs) - Output(rhs);
return mul_op(y, y);
}
};
/// Computes the square of a difference with optional conversion
template <typename T, typename Output = T>
struct magnitude_squared_difference {
CUTLASS_HOST_DEVICE
Output operator()(T lhs, T rhs) const {
multiplies<Output> mul_op;
Output y = Output(lhs) - Output(rhs);
return mul_op(y, y);
}
};
// Computes the reciprocal square root
template <typename T>
struct inverse_square_root;
template <>
struct inverse_square_root<float> {
CUTLASS_HOST_DEVICE
float operator()(float const &lhs) const {
#if defined(__CUDA_ARCH__)
return rsqrtf(lhs);
#else
return 1.f / std::sqrt(lhs);
#endif
}
};
template <>
struct inverse_square_root<half_t> {
CUTLASS_HOST_DEVICE
half_t operator()(half_t const &lhs) const {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ > 520
auto result = hrsqrt(reinterpret_cast<__half const &>(lhs));
return reinterpret_cast<half_t const &>(result);
#else
return half_t(1.f / std::sqrt(half_t::convert(lhs)));
#endif
}
};
/// Divides
template <typename T>
struct divides {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
lhs /= rhs;
return lhs;
}
};
/// reciprocal_approximate
template <typename T>
struct reciprocal_approximate {
CUTLASS_HOST_DEVICE
T operator()(T lhs) const {
return divides<T>{}(T(1), lhs);
}
};
template <>
struct reciprocal_approximate <float> {
CUTLASS_HOST_DEVICE
float operator()(float lhs) const {
float ret;
#if defined(__CUDA_ARCH__)
asm volatile ("rcp.approx.f32 %0, %1;\n" : "=f"(ret) : "f"(lhs));
#else
ret = 1.0f / lhs;
#endif
return ret;
}
};
/// reciprocal_approximate with ftz
template<typename T>
struct reciprocal_approximate_ftz : reciprocal_approximate<T>
{};
template <>
struct reciprocal_approximate_ftz <float> {
CUTLASS_HOST_DEVICE
float operator()(float lhs) const {
float ret;
#if defined(__CUDA_ARCH__)
asm volatile ("rcp.approx.ftz.f32 %0, %1;\n" : "=f"(ret) : "f"(lhs));
#else
if (std::fpclassify(lhs) == FP_SUBNORMAL) {
lhs = 0.0f;
}
ret = 1.0f / lhs;
if (std::fpclassify(ret) == FP_SUBNORMAL) {
ret = 0.0f;
}
#endif
return ret;
}
};
/// Negate
template <typename T>
struct negate {
CUTLASS_HOST_DEVICE
T operator()(T lhs) const {
return -lhs;
}
};
/// Greater equal
template <typename T>
struct greater_equal {
CUTLASS_HOST_DEVICE
bool operator()(T const &lhs, T const &rhs) const {
return (lhs >= rhs);
}
};
/// Greater
template <typename T>
struct greater {
CUTLASS_HOST_DEVICE
bool operator()(T const &lhs, T const &rhs) const {
return (lhs > rhs);
}
};
/// Less equal
template <typename T>
struct less_equal {
CUTLASS_HOST_DEVICE
bool operator()(T const &lhs, T const &rhs) const {
return (lhs <= rhs);
}
};
/// Less
template <typename T>
struct less {
CUTLASS_HOST_DEVICE
bool operator()(T const &lhs, T const &rhs) const {
return (lhs < rhs);
}
};
template <typename T, bool PropagateNaN = false>
struct maximum {
CUTLASS_HOST_DEVICE
T operator()(T const &lhs, T const &rhs) const {
return (lhs < rhs ? rhs : lhs);
}
};
// This is a subclass and not an alias
// in order to work around a known Clang issue,
// where a template template parameter with one template parameter
// does not match classes that take multiple template parameters
// but have defaults for all but the first.
template<typename T>
struct maximum_with_default_nan_propagation : public maximum<T>
{};
// Maximum with nan propagation
// To propagate NANs, the "max" of a two element that contains NaNs should also return a NaN
template <typename T>
struct maximum<T, true> {
CUTLASS_HOST_DEVICE
T operator()(T const &lhs, T const &rhs) const {
using CUTLASS_CMATH_NAMESPACE :: isnan;
// Call isnan unqualified, so argument-dependent lookup (ADL)
// will find overloads such as cutlass::isnan(half_t).
// Calling ::isnan or std::isnan directly would force
// implicit conversions to float of custom number types
// in the cutlass namespace (e.g., cutlass::half_t).
return lhs > rhs || isnan(lhs) ? lhs : rhs;
}
};
template <>
struct maximum<float, false> {
CUTLASS_HOST_DEVICE
float operator()(float const &lhs, float const &rhs) const {
return fmaxf(lhs, rhs);
}
};
template <>
struct maximum<float, true> {
CUTLASS_HOST_DEVICE
float operator()(float const lhs, float const rhs) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
float res;
asm volatile("max.NaN.f32 %0, %1, %2;\n" : "=f"(res) : "f"(lhs), "f"(rhs));
return res;
#else
using CUTLASS_CMATH_NAMESPACE :: isnan;
return lhs > rhs || isnan(lhs) ? lhs : rhs;
#endif
}
};
// This is a subclass and not an alias
// in order to work around a known Clang issue,
// where a template template parameter with one template parameter
// does not match classes that take multiple template parameters
// but have defaults for all but the first.
template <typename T>
struct maximum_with_nan_propagation : maximum<T, true>
{};
// This alias exists for backwards compatibility only.
// Please use the correctly spelled class template above.
template <typename T>
using maximum_with_nan_propogation = maximum_with_nan_propagation<T>;
template <typename T, bool PropagateNaN = false>
struct minimum{
CUTLASS_HOST_DEVICE
T operator()(T const &lhs, T const &rhs) const {
return (rhs < lhs ? rhs : lhs);
}
};
template <typename T>
struct minimum<T, true> {
CUTLASS_HOST_DEVICE
T operator()(T const &lhs, T const &rhs) const {
using CUTLASS_CMATH_NAMESPACE :: isnan;
return lhs < rhs || isnan(lhs) ? lhs : rhs;
}
};
template <>
struct minimum<float, false> {
CUTLASS_HOST_DEVICE
float operator()(float const &lhs, float const &rhs) const {
return fminf(lhs, rhs);
}
};
template <typename T>
struct minimum_with_nan_propagation : minimum<T, true>
{};
template <typename T, bool PropagateNaN = false>
struct maximum_absolute_value {
CUTLASS_HOST_DEVICE
float operator()(T const &lhs, T const &rhs) const {
absolute_value_op<T> abs_op;
maximum<T, PropagateNaN> max_op;
return max_op(abs_op(lhs), abs_op(rhs));
}
};
// assumes the left operand is already an absolute value
template <typename T, bool PropagateNaN = false>
struct maximum_absolute_value_reduction {
CUTLASS_HOST_DEVICE
float operator()(T const &lhs, T const &rhs) const {
absolute_value_op<T> abs_op;
maximum<T, PropagateNaN> max_op;
return max_op(lhs, abs_op(rhs));
}
};
/// Fused multiply-add
template <typename A, typename B = A, typename C = A>
struct multiply_add {
CUTLASS_HOST_DEVICE
C operator()(A const &a, B const &b, C const &c) const {
return C(a) * C(b) + c;
}
};
template <typename T>
struct square_and_plus {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
multiply_add<T> multiply_add_op;
return multiply_add_op(rhs, rhs, lhs);
}
};
// Fused multiply-add that takes exactly one template parameter.
// This is useful for working around a known Clang issue,
// where a template template parameter with one template parameter
// does not match classes that take multiple template parameters
// but have defaults for all but the first.
template <typename A>
struct homogeneous_multiply_add : public multiply_add<A, A, A>
{};
/// Fused multiply-add
template <typename A, typename B = A, typename C = A>
struct multiply_add_relu0 {
CUTLASS_HOST_DEVICE
C operator()(A const &a, B const &b, C const &c) const {
maximum<C> mx;
return mx(C(a) * C(b) + c, C(0));
}
};
/// Guarded-multiply-add
template <typename A, typename B = A, typename C = A>
struct guarded_multiply_add {
CUTLASS_HOST_DEVICE
C operator()(A const &a, B const &b, C const &c) const {
using CUTLASS_CMATH_NAMESPACE :: isnan;
if (isnan(a) || isnan(b)) {
return C(0);
}
return C(a) * C(b) + c;
}
};
/// Guarded-multiply-add
template <>
struct guarded_multiply_add<half_t, half_t, half_t> {
CUTLASS_HOST_DEVICE
half_t operator()(half_t const &a, half_t const &b, half_t const &c) const {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 900
half_t result;
asm ("fma.rn.oob.f16 %0, %1, %2, %3;\n"
: "=h"(*reinterpret_cast<uint16_t*>(&result))
: "h"(*reinterpret_cast<uint16_t const*>(&a)), "h"(*reinterpret_cast<uint16_t const*>(&b)), "h"(*reinterpret_cast<uint16_t const*>(&c)));
return result;
#else
// Namespace-qualifying isnan as cutlass::isnan saves the compiler
// the trouble of argument-dependent lookup. Calling std::isnan or
// ::isnan here would result in unwanted implicit conversion to float.
if (cutlass::isnan(a) || cutlass::isnan(b)) {
return half_t(0);
}
return a * b + c;
#endif
}
};
/// Guarded-multiply-add-relu0
template <typename A, typename B = A, typename C = A>
struct guarded_multiply_add_relu0 {
CUTLASS_HOST_DEVICE
C operator()(A const &a, B const &b, C const &c) const {
using CUTLASS_CMATH_NAMESPACE :: isnan;
if (isnan(a) || isnan(b)) {
return C(0);
}
maximum<C> mx;
return mx(C(a) * C(b) + c, C(0));
}
};
template <>
struct guarded_multiply_add_relu0<half_t, half_t, half_t> {
CUTLASS_HOST_DEVICE
half_t operator()(half_t const &a, half_t const &b, half_t const &c) const {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 900
half_t result;
asm ("fma.rn.oob.relu.f16 %0, %1, %2, %3;\n"
: "=h"(*reinterpret_cast<uint16_t*>(&result))
: "h"(*reinterpret_cast<uint16_t const*>(&a)), "h"(*reinterpret_cast<uint16_t const*>(&b)), "h"(*reinterpret_cast<uint16_t const*>(&c)));
return result;
#else
if (cutlass::isnan(a) || cutlass::isnan(b)) {
return half_t(0);
}
maximum<half_t> mx;
return mx(a * b + c, half_t(0));
#endif
}
};
/// Fused multiply-add
template <typename T>
struct and_add {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b, T const &c) const {
return ((a & b) + c);
}
};
/// Fused multiply-add
template <typename T>
struct xor_add {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b, T const &c) const {
return ((a ^ b) + c);
}
};
namespace detail {
// Whether namespace-unqualified conj(t) for t of type T is
// well-formed. This says whether the compiler can find
// namespace-unqualified conj(T) via argument-dependent lookup.
// If so, then CUTLASS assumes that conj(t) returns
// the complex conjugate of t.
template <typename T, typename Enable = void>
struct has_unqualified_conj : cutlass::platform::false_type
{};
template<typename T>
struct has_unqualified_conj<
T,
decltype(conj(cutlass::platform::declval<T>()), void())
> : cutlass::platform::true_type
{};
template <typename T>
constexpr bool has_unqualified_conj_v = has_unqualified_conj<T>::value;
} // namespace detail
// forward declaration (needed for conjugate below)
template<class T>
CUTLASS_HOST_DEVICE T conj(T const& z);
namespace detail {
// Whether cutlass::conj(t) for t of type T is well-formed.
// If so, then CUTLASS assumes that cutlass::conj(t)
// returns the complex conjugate of t.
template <typename T, typename Enable = void>
struct has_cutlass_conj : cutlass::platform::false_type
{};
template<typename T>
struct has_cutlass_conj<
T,
decltype(cutlass::conj(cutlass::platform::declval<T>()), void())
> : cutlass::platform::true_type
{};
template <typename T>
constexpr bool has_cutlass_conj_v = has_cutlass_conj<T>::value;
} // namespace detail
// Return the complex conjugate of the input.
//
// If the struct hasn't already been specialized for type T, then
//
// 1. for arithmetic types, return z;
//
// 2. for types where either (namespace-unqualified) conj(z) or
// cutlass::conj(z) is well formed, declare "using cutlass::conj;"
// and return conj(z); and
//
// 3. for everything else, return z.
//
// Regarding (1), the C++ Standard Library makes std::conj always
// return std::complex, even for (noncomplex) arithmetic types.
// cutlass::conj(T t) needs to return type T. This follows the
// convention of linear algebra software like the BLAS, where
// "conjugate transpose" means the same thing as "transpose" for a
// matrix of noncomplex numbers.
//
// Case (2) covers std::complex, cuda::std::complex, and non-Standard
// (including user-defined) complex number types (for which "conj(z)"
// is findable via argument-dependent lookup). cutlass::conj has a
// totally generic overload, but a more type-specific overload in any
// namespace will take precedence.
//
// Case (3) covers non-Standard non-complex number types.
//
// Users should not generally need to specialize this struct for their
// own custom complex or noncomplex types. The idiomatic way to
// identify a type T as "complex" is to make namespace-unqualified
// calls to conj(T) findable via argument-dependent lookup.
template <typename T>
struct conjugate {
CUTLASS_HOST_DEVICE
T operator()(T const& z) const {
if constexpr (cutlass::platform::is_arithmetic_v<T>) {
return z;
}
else if constexpr (detail::has_unqualified_conj_v<T> || detail::has_cutlass_conj_v<T>) {
using cutlass::conj;
return conj(z);
}
else {
return z;
}
}
};
template <typename T>
struct first {
CUTLASS_HOST_DEVICE
T operator()(T const & first, T const &...) const {
return first;
}
CUTLASS_HOST_DEVICE
T operator()(T const & first) const {
return first;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct logical_and {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b) const {
return ((static_cast<bool>(a) && static_cast<bool>(b)) ? T(1) : T());
}
};
template <typename T>
struct logical_or {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b) const {
return ((static_cast<bool>(a) || static_cast<bool>(b)) ? T(1) : T());
}
};
template <typename T>
struct logical_not {
CUTLASS_HOST_DEVICE
T operator()(T const &a) const {
return T(!(a));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct bit_and {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b) const {
return a & b;
}
};
template <typename T>
struct bit_or {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b) const {
return a | b;
}
};
template <typename T>
struct bit_not {
CUTLASS_HOST_DEVICE
T operator()(T const &a) const {
return ~a;
}
};
template <typename T>
struct bit_xor {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b) const {
return a ^ b;
}
};
//////////////////////////////////////////////////////////////////////////////////////////////////
/// Atomic reductions
template <typename T>
struct atomic_add
{
CUTLASS_DEVICE
void operator()(T *ptr, const T &data)
{
#if defined(__CUDA_ARCH__)
atomicAdd(ptr, data);
#else
CUTLASS_UNUSED(ptr);
CUTLASS_UNUSED(data);
CUTLASS_NOT_IMPLEMENTED();
#endif
}
};
template<>
struct atomic_add<double>
{
CUTLASS_DEVICE
void operator()(double *ptr, const double &data)
{
#if !defined(__CUDA_ARCH__)
CUTLASS_UNUSED(ptr);
CUTLASS_UNUSED(data);
CUTLASS_NOT_IMPLEMENTED();
#elif (__CUDA_ARCH__ >= 600)
atomicAdd(ptr, data);
#else
// Use CAS loop
unsigned long long int* ptr_int = reinterpret_cast<unsigned long long int*>(ptr);
unsigned long long int old_int = *ptr_int;
unsigned long long int assumed_int;
do {
double update = data + __longlong_as_double(old_int);
assumed_int = old_int;
old_int = atomicCAS(ptr_int, assumed_int, __double_as_longlong(update));
} while (assumed_int != old_int);
#endif // (__CUDA_ARCH__ >= 600)
}
};
template<>
struct atomic_add<half2>
{
CUTLASS_DEVICE
void operator()(half2 *ptr, const half2 &data)
{
#if !defined(__CUDA_ARCH__) || (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600))
CUTLASS_UNUSED(ptr);
CUTLASS_UNUSED(data);
CUTLASS_NOT_IMPLEMENTED();
#else
// Vector-2 atomic reduction requires .target sm_60 or higher
uint32_t word = reinterpret_cast<const uint32_t&>(data);
asm volatile ("red.gpu.global.add.noftz.f16x2 [%0], %1;\n" : : "l"(ptr), "r"(word));
#endif // (__CUDA_ARCH__ >= 600)
}
};
template <typename T>
using red [[deprecated("use atomic_add instead")]] = atomic_add<T>;
template <typename T>
struct atomic_maximum {
CUTLASS_DEVICE
T operator()(T *ptr, T value) const {
#if defined(__CUDA_ARCH__)
return atomicMax(ptr, value);
#else
CUTLASS_UNUSED(ptr);
CUTLASS_UNUSED(value);
CUTLASS_NOT_IMPLEMENTED();
return 0;
#endif
}
};
template <>
struct atomic_maximum<float> {
CUTLASS_DEVICE
float operator()(float *ptr, float value) const {
#if defined(__CUDA_ARCH__)
// In device code, make sure that we do NOT try to use
// std::signbit, as that won't work if building with NVRTC.
// Instead, prefix "::" to call signbit from the global namespace,
// which CUDA guarantees to work in device code without including
// any headers.
//
return ! ::signbit(value) ?
__int_as_float(atomicMax((int*)ptr, __float_as_int(value))) :
__uint_as_float(atomicMin((unsigned int*)ptr, __float_as_uint(value)));
#else
CUTLASS_UNUSED(ptr);
CUTLASS_UNUSED(value);
CUTLASS_NOT_IMPLEMENTED();
return 0;
#endif
}
};
// is_atomic
template <class Fn>
struct is_atomic : platform::false_type {};
template <class T>
struct is_atomic<atomic_add<T>> : platform::true_type {};
template <class T>
struct is_atomic<atomic_maximum<T>> : platform::true_type {};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for nvcuda::wmma::fragment<Use, m, n, k, T, Layout>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
template<typename Use, int m, int n, int k, typename T, typename Layout>
struct plus<nvcuda::wmma::fragment<Use, m, n, k, T, Layout>>
{
using Fragment = nvcuda::wmma::fragment<Use, m, n, k, T, Layout>;
using ElementType = typename Fragment::element_type;
CUTLASS_HOST_DEVICE
Fragment operator()(Fragment const &lhs, Fragment const &rhs) const
{
Fragment result;
plus<ElementType> scalar_op;
ElementType *result_elts = reinterpret_cast<ElementType*>(&result);
const ElementType *lhs_elts = reinterpret_cast<const ElementType*>(&lhs);
const ElementType *rhs_elts = reinterpret_cast<const ElementType*>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Fragment::num_elements; i++) {
result_elts[i] = scalar_op(lhs_elts[i], rhs_elts[i]);
}
return result;
}
};
#endif // defined(CUTLASS_ARCH_WMMA_ENABLED)
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/functional.h/0 | {
"file_path": "include/cutlass/functional.h",
"repo_id": "include",
"token_count": 9214
} | 29 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for GEMM performing a reduction over K partitions in parallel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/device_kernel.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/gemm/kernel/gemm.h"
#include "cutlass/gemm/kernel/default_gemm_splitk_parallel.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/reduction/kernel/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
////////////////////////////////////////////////////////////////////////////////
/*!
Gemm device-level operator performing parallel reduction over the K partition.
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassSimt,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_ = arch::Sm70,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Epilogue output operator
typename ConvertScaledOp_ = cutlass::epilogue::thread::Convert<
ElementAccumulator_,
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementAccumulator_,
ElementAccumulator_>::EpilogueOutputOp::kCount,
ElementAccumulator_>,
/// Reduction operator
typename ReductionOp_ = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator_, typename EpilogueOutputOp_::ElementAccumulator,
EpilogueOutputOp_::kCount>,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
threadblock::GemmSplitKHorizontalThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int kAlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int kAlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator>
class GemmSplitKParallel {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ConvertScaledOp = ConvertScaledOp_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ReductionOp = ReductionOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static int const kStages = Stages;
/// GEMM kernel
using GemmKernel = typename kernel::DefaultGemmSplitKParallel<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
ConvertScaledOp,
ThreadblockSwizzle,
kStages,
Operator
>::GemmKernel;
/// Reduction kernel
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>;
//
//
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A;
TensorRef<ElementB const, LayoutB> ref_B;
TensorRef<ElementC const, LayoutC> ref_C;
TensorRef<ElementC, LayoutC> ref_D;
typename EpilogueOutputOp::Params epilogue;
int split_k_slices;
typename ConvertScaledOp::Params convert;
typename ReductionOp::Params reduction;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments() { }
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A_,
TensorRef<ElementB const, LayoutB> ref_B_,
TensorRef<ElementC const, LayoutC> ref_C_,
TensorRef<ElementC, LayoutC> ref_D_,
typename EpilogueOutputOp::Params epilogue_ =
typename EpilogueOutputOp::Params(),
int split_k_slices = 1,
typename ConvertScaledOp::Params convert_ =
typename ConvertScaledOp::Params(),
typename ReductionOp::Params reduction_ =
typename ReductionOp::Params()
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
epilogue(epilogue_),
split_k_slices(split_k_slices),
convert(convert_),
reduction(reduction_) { }
};
private:
/// Kernel parameters object
typename GemmKernel::Params gemm_params_;
/// Reduction kernel parameters object
typename ReductionKernel::Params reduction_params_;
public:
/// Constructs the GEMM.
GemmSplitKParallel() { }
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
return sizeof(ElementAccumulator_) * size_t(args.problem_size.m()) * size_t(args.problem_size.n()) * grid_shape.k();
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
// Define a reference to the workspace - this is an aligned region in device memory.
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
TensorRef<ElementAccumulator_, layout::RowMajor> ref_workspace(
static_cast<ElementAccumulator_ *>(workspace),
args.problem_size.n());
int64_t partition_stride = int64_t(args.problem_size.m()) * int64_t(args.problem_size.n());
// Initialize the Params structure
gemm_params_ = typename GemmKernel::Params{
args.problem_size,
grid_shape,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
ref_workspace,
args.convert,
partition_stride
};
reduction_params_ = typename ReductionKernel::Params(
args.problem_size.mn(),
grid_shape.k(),
partition_stride,
ref_workspace,
args.ref_D,
args.ref_C.non_const_ref(),
args.epilogue
);
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
gemm_params_.ref_A.reset(args.ref_A.data());
gemm_params_.ref_B.reset(args.ref_B.data());
gemm_params_.ref_D.reset(workspace);
reduction_params_.ref_D.reset(args.ref_D.data());
reduction_params_.ref_C.reset(args.ref_C.data());
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
//
// Launch GEMM kernel
//
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(gemm_params_.grid_tiled_shape);
dim3 block(GemmKernel::kThreadCount, 1, 1);
cudaError_t result;
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
result = cudaFuncSetAttribute(
Kernel<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(gemm_params_);
result = cudaGetLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
//
// Launch reduction kernel
//
block = ReductionKernel::block_shape();
grid = ReductionKernel::grid_shape(gemm_params_.problem_size.mn());
Kernel<ReductionKernel><<< grid, block, 0, stream >>>(reduction_params_);
result = cudaGetLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for column-major output
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Epilogue output operator
typename EpilogueOutputOp_,
/// Epilogue output operator
typename ConvertScaledOp_,
/// Reduction operator
typename ReductionOp_,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_,
/// Number of stages used in the pipelined mainloop
int Stages, int kAlignmentA, int kAlignmentB,
/// Operation performed by GEMM
typename Operator_>
class GemmSplitKParallel<ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_,
layout::ColumnMajor, ElementAccumulator_,
OperatorClass_, ArchTag_, ThreadblockShape_,
WarpShape_, InstructionShape_, EpilogueOutputOp_,
ConvertScaledOp_, ReductionOp_, ThreadblockSwizzle_,
Stages, kAlignmentA, kAlignmentB, Operator_> {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = layout::ColumnMajor;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using ConvertScaledOp = ConvertScaledOp_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ReductionOp = ReductionOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
static int const kStages = Stages;
using UnderlyingOperator = GemmSplitKParallel<
ElementB,
typename layout::LayoutTranspose<LayoutB>::type,
ElementA,
typename layout::LayoutTranspose<LayoutA>::type,
ElementC,
layout::RowMajor,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ConvertScaledOp,
ReductionOp,
ThreadblockSwizzle,
Stages,
kAlignmentA,
kAlignmentB,
Operator
>;
using UnderlyingArguments = typename UnderlyingOperator::Arguments;
using GemmKernel = typename UnderlyingOperator::GemmKernel;
using ReductionKernel = typename UnderlyingOperator::ReductionKernel;
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A;
TensorRef<ElementB const, LayoutB> ref_B;
TensorRef<ElementC const, LayoutC> ref_C;
TensorRef<ElementC, LayoutC> ref_D;
typename EpilogueOutputOp::Params epilogue;
int split_k_slices;
typename ConvertScaledOp::Params convert;
typename ReductionOp::Params reduction;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments() { }
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A_,
TensorRef<ElementB const, LayoutB> ref_B_,
TensorRef<ElementC const, LayoutC> ref_C_,
TensorRef<ElementC, LayoutC> ref_D_,
typename EpilogueOutputOp::Params epilogue_ =
typename EpilogueOutputOp::Params(),
int split_k_slices = 1,
typename ConvertScaledOp::Params convert_ =
typename ConvertScaledOp::Params(),
typename ReductionOp::Params reduction_ =
typename ReductionOp::Params()
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
epilogue(epilogue_),
split_k_slices(split_k_slices),
convert(convert_),
reduction(reduction_) { }
};
private:
/// Kernel parameters object
UnderlyingOperator underlying_operator_;
public:
/// Constructs the GEMM.
GemmSplitKParallel() { }
/// Helper to construct a transposed equivalent for the underying GEMM operator
static UnderlyingArguments to_underlying_arguments(Arguments const &args) {
return UnderlyingArguments(
{args.problem_size.n(), args.problem_size.m(), args.problem_size.k()},
{args.ref_B.data(), args.ref_B.stride(0)},
{args.ref_A.data(), args.ref_A.stride(0)},
{args.ref_C.data(), args.ref_C.stride(0)},
{args.ref_D.data(), args.ref_D.stride(0)},
args.epilogue,
args.split_k_slices,
args.convert,
args.reduction
);
}
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
return UnderlyingOperator::can_implement(to_underlying_arguments(args));
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args));
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace) {
return underlying_operator_.initialize(to_underlying_arguments(args), workspace);
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
return underlying_operator_.update(to_underlying_arguments(args), workspace);
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
return underlying_operator_.run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/device/gemm_splitk_parallel.h/0 | {
"file_path": "include/cutlass/gemm/device/gemm_splitk_parallel.h",
"repo_id": "include",
"token_count": 7350
} | 30 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level SYMM/HEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/complex.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/kernel/symm_universal.h"
#include "cutlass/gemm/kernel/default_symm.h"
#include "cutlass/gemm/kernel/default_symm_complex.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Side Mode for A (kLeft or kRight)
SideMode SideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode FillModeA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by SYRK
typename Operator,
/// Blas3 computation mode (symmetric/hermitian)
BlasMode BlasMode_ = BlasMode::kSymmetric,
///
typename Enable = void
>
struct DefaultSymmUniversal;
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Real-valued SYMM/HEMM update kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Side Mode for A (kLeft or kRight)
SideMode SideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode FillModeA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by SYMM/HEMM
typename Operator>
struct DefaultSymmUniversal<
ElementA,
LayoutA,
SideModeA,
FillModeA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
BlasMode::kSymmetric,
typename platform::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultSymmkernel = typename kernel::DefaultSymm<
ElementA,
LayoutA,
SideModeA,
FillModeA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
BlasMode::kSymmetric
>::SymmKernel;
/// Define the kernel in terms of the default kernel
using SymmKernel = kernel::SymmUniversal<
typename DefaultSymmkernel::Mma1,
typename DefaultSymmkernel::Mma2,
typename DefaultSymmkernel::Epilogue,
ThreadblockSwizzle,
SideModeA,
FillModeA
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Complex-valued SYMM/HEMM update kernels
//
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Side Mode for A (kLeft or kRight)
SideMode SideModeA,
/// Fill Mode for A (kLower or kUpper)
FillMode FillModeA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by SYRK
typename Operator,
// BlasMode
BlasMode kBlasMode
>
struct DefaultSymmUniversal<
ElementA,
LayoutA,
SideModeA,
FillModeA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
SplitKSerial,
Operator,
kBlasMode,
typename platform::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type
> {
using DefaultSymmkernel = typename kernel::DefaultSymmComplex<
ElementA,
LayoutA,
SideModeA,
FillModeA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator,
SplitKSerial,
kBlasMode
>::SymmKernel;
/// Define the kernel in terms of the default kernel
using SymmKernel = kernel::SymmUniversal<
typename DefaultSymmkernel::Mma1,
typename DefaultSymmkernel::Mma2,
typename DefaultSymmkernel::Epilogue,
ThreadblockSwizzle,
SideModeA,
FillModeA
>;
};
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/default_symm_universal.h/0 | {
"file_path": "include/cutlass/gemm/kernel/default_symm_universal.h",
"repo_id": "include",
"token_count": 3292
} | 31 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/arch/arch.h"
#include "cutlass/fast_math.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/params_universal_base.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
namespace detail {
template <
typename LayoutA,
typename LayoutB,
typename LayoutC,
typename LayoutE
>
struct SparseUniversalArgumentsBase : UniversalArgumentsBase {
//
// Data members
//
void const * ptr_A;
void const * ptr_B;
void const * ptr_C;
void * ptr_D;
void const * ptr_E;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_E;
typename LayoutA::Stride::LongIndex lda;
typename LayoutB::Stride::LongIndex ldb;
typename LayoutC::Stride::LongIndex ldc;
typename LayoutC::Stride::LongIndex ldd;
typename LayoutE::Stride::LongIndex lde;
//
// Methods
//
SparseUniversalArgumentsBase():
ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr), ptr_E(nullptr)
{}
/// constructs an arguments structure
SparseUniversalArgumentsBase(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
void const * ptr_E,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
int64_t batch_stride_E,
typename LayoutA::Stride::LongIndex lda,
typename LayoutB::Stride::LongIndex ldb,
typename LayoutC::Stride::LongIndex ldc,
typename LayoutC::Stride::LongIndex ldd,
typename LayoutC::Stride::LongIndex lde)
:
UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D),
ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), ptr_E(ptr_E),
batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C),
batch_stride_E(batch_stride_E),
lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), lde(lde)
{
CUTLASS_TRACE_HOST("SparseUniversalArgumentsBase::Arguments() - problem_size: " << problem_size);
}
};
template <
typename Mma,
typename Epilogue,
typename Arguments,
typename ThreadblockSwizzle,
typename ThreadblockShape,
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutA,
typename LayoutB
>
struct SparseUniversalParamsBase : UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB> {
using ParamsBase = UniversalParamsBase<
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>;
//
// Data members
//
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename Mma::IteratorE::Params params_E;
void * ptr_A;
void * ptr_B;
void * ptr_C;
void * ptr_D;
void * ptr_E;
int64_t batch_stride_A;
int64_t batch_stride_B;
int64_t batch_stride_C;
int64_t batch_stride_E;
//
// Host dispatch API
//
/// Default constructor
SparseUniversalParamsBase() = default;
/// Constructor
SparseUniversalParamsBase(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
params_A(args.lda),
params_B(args.ldb),
params_C(args.ldc),
params_D(args.ldd),
params_E(args.lde),
ptr_A(const_cast<void *>(args.ptr_A)),
ptr_B(const_cast<void *>(args.ptr_B)),
ptr_C(const_cast<void *>(args.ptr_C)),
ptr_D(args.ptr_D),
ptr_E(const_cast<void *>(args.ptr_E)),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
batch_stride_C(args.batch_stride_C),
batch_stride_E(args.batch_stride_E)
{}
/// Lightweight update given a subset of arguments.
void update(Arguments const &args)
{
CUTLASS_TRACE_HOST("SparseUniversalParamsBase::update()");
// Update input/output pointers
this->ptr_A = const_cast<void *>(args.ptr_A);
this->ptr_B = const_cast<void *>(args.ptr_B);
this->ptr_C = const_cast<void *>(args.ptr_C);
this->ptr_D = args.ptr_D;
this->ptr_E = const_cast<void *>(args.ptr_E);
this->batch_stride_A = args.batch_stride_A;
this->batch_stride_B = args.batch_stride_B;
this->batch_stride_C = args.batch_stride_C;
this->batch_stride_D = args.batch_stride_D;
this->batch_stride_E = args.batch_stride_E;
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
class GemmSparseUniversal {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static int const kSparse = Mma::kSparse;
static int const kMetaSizeInBits = Mma::kMetaSizeInBits;
static int const kMaxID2 = Mma::kMaxID2;
static int const kElementsPerElementE = Mma::kElementsPerElementE;
using ElementE = typename Mma::ElementE;
using LayoutE = typename Mma::LayoutE;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value);
//
// Structures
//
/// Argument structure
struct Arguments : detail::SparseUniversalArgumentsBase<
LayoutA,
LayoutB,
LayoutC,
LayoutE
> {
using Base = detail::SparseUniversalArgumentsBase<
LayoutA,
LayoutB,
LayoutC,
LayoutE
>;
typename EpilogueOutputOp::Params epilogue;
Arguments() {}
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode,
GemmCoord problem_size,
int batch_count,
typename EpilogueOutputOp::Params epilogue,
void const * ptr_A,
void const * ptr_B,
void const * ptr_C,
void * ptr_D,
void const * ptr_E,
int64_t batch_stride_A,
int64_t batch_stride_B,
int64_t batch_stride_C,
int64_t batch_stride_D,
int64_t batch_stride_E,
typename LayoutA::Stride::LongIndex lda,
typename LayoutB::Stride::LongIndex ldb,
typename LayoutC::Stride::LongIndex ldc,
typename LayoutC::Stride::LongIndex ldd,
typename LayoutC::Stride::LongIndex lde)
:
Base(
mode, problem_size, batch_count,
ptr_A, ptr_B, ptr_C, ptr_D, ptr_E,
batch_stride_A, batch_stride_B, batch_stride_C, batch_stride_D, batch_stride_E,
lda, ldb, ldc, ldd, lde
),
epilogue(epilogue)
{
CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size);
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params : detail::SparseUniversalParamsBase<
Mma,
Epilogue,
Arguments,
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>
{
using ParamsBase = detail::SparseUniversalParamsBase<
Mma,
Epilogue,
Arguments,
ThreadblockSwizzle,
ThreadblockShape,
ElementA,
ElementB,
ElementC,
LayoutA,
LayoutB>;
typename EpilogueOutputOp::Params output_op;
//
// Host dispatch API
//
/// Default constructor
Params() = default;
/// Constructor
Params(
Arguments const &args, /// GEMM application arguments
int device_sms, /// Number of SMs on the device
int sm_occupancy) /// Kernel SM occupancy (in thread blocks)
:
ParamsBase(args, device_sms, sm_occupancy),
output_op(args.epilogue)
{}
/// Lightweight update given a subset of arguments.
void update(Arguments const &args)
{
CUTLASS_TRACE_HOST("GemmUniversal::Params::update()");
// Update input/output pointers
this->ptr_A = const_cast<void *>(args.ptr_A);
this->ptr_B = const_cast<void *>(args.ptr_B);
this->ptr_C = const_cast<void *>(args.ptr_C);
this->ptr_D = args.ptr_D;
this->ptr_E = const_cast<void *>(args.ptr_E);
this->batch_stride_A = args.batch_stride_A;
this->batch_stride_B = args.batch_stride_B;
this->batch_stride_C = args.batch_stride_C;
this->batch_stride_D = args.batch_stride_D;
this->batch_stride_E = args.batch_stride_E;
output_op = args.epilogue;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
public:
//
// Host dispatch API
//
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size,
GemmUniversalMode mode,
int split_k_count)
{
CUTLASS_TRACE_HOST("GemmUniversal::can_implement()");
static int const kAlignmentA = (cute::is_same<LayoutA,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (cute::is_same<LayoutA,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = (cute::is_same<LayoutB,
layout::RowMajorInterleaved<32>>::value)
? 32
: (cute::is_same<LayoutB,
layout::RowMajorInterleaved<64>>::value)
? 64
: Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = (cute::is_same<LayoutC,
layout::ColumnMajorInterleaved<32>>::value)
? 32
: (cute::is_same<LayoutC,
layout::ColumnMajorInterleaved<64>>::value)
? 64
: Epilogue::OutputTileIterator::kElementsPerAccess;
static int const kAlignmentE = Mma::IteratorE::AccessType::kElements;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
bool isEMisaligned = false;
if (cute::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = (problem_size.k() / kSparse) % kAlignmentA;
} else if (cute::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (cute::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| cute::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = (problem_size.k() / kSparse) % kAlignmentA;
}
if (cute::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (cute::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = (problem_size.k() / kSparse) % kAlignmentB;
} else if (cute::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| cute::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = (problem_size.k() / kSparse) % kAlignmentB;
}
if (cute::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (cute::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (cute::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| cute::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
isEMisaligned = (problem_size.m() % kAlignmentE)
|| ((problem_size.k() / kSparse) % kAlignmentE);
// The k dimension has to be the multiple of the Threadblock k because out
// of bound meta data would be initialized to 0 by acync.zfill but 0 is not
// a valid meta data.
if (problem_size.k() % Mma::Shape::kK) {
isEMisaligned = true;
}
if (mode == GemmUniversalMode::kGemm
|| mode == GemmUniversalMode::kGemmSplitKParallel) {
if ((problem_size.k() / split_k_count) % Mma::Shape::kK) {
isEMisaligned = true;
}
}
// M dimension has to be multiple of 32 (sparse float) or 16 (sparse int)
// because of the row reordering of operand E
static int const kAlignmentM = (sizeof(ElementE) == 2) ? 32 : 16;
if (problem_size.m() % kAlignmentM) {
isEMisaligned = true;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
if (isEMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for E operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size, args.mode, args.batch_count);
}
public:
//
// Device-only API
//
// Factory invocation
CUTLASS_DEVICE
static void invoke(
Params const ¶ms,
SharedStorage &shared_storage)
{
GemmSparseUniversal op;
op(params, shared_storage);
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
ThreadblockSwizzle threadblock_swizzle;
run_with_swizzle(params, shared_storage, threadblock_swizzle);
}
/// Executes one GEMM with an externally-provided swizzling function
CUTLASS_DEVICE
void run_with_swizzle(Params const ¶ms, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) {
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
ElementE *ptr_E = static_cast<ElementE *>(params.ptr_E);
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A / kSparse;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
ptr_E += threadblock_tile_offset.k() * params.batch_stride_E / kSparse;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
ptr_E = static_cast<ElementE * const *>(params.ptr_E)[threadblock_tile_offset.k()];
}
__syncthreads();
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k / kSparse,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
cutlass::MatrixCoord tb_offset_E{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k / kSparse / kElementsPerElementE,
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k / kSparse},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
typename Mma::IteratorE iterator_E(
params.params_E,
ptr_E,
{params.problem_size.m(), problem_size_k / kSparse / kElementsPerElementE},
thread_idx,
tb_offset_E);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = canonical_warp_idx_sync();
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
iterator_E,
accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C);
ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D);
//
// Fetch pointers based on mode.
//
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
if (params.mode == GemmUniversalMode::kGemm) {
// If performing a reduction via split-K, fetch the initial synchronization
if (params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
}
else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) {
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_C += threadblock_tile_offset.k() * params.batch_stride_C;
ptr_D += threadblock_tile_offset.k() * params.batch_stride_D;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()];
ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()];
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
ptr_C,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
ptr_D,
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
}
// Execute the epilogue operator to update the destination tensor.
epilogue(
output_op,
iterator_D,
accumulators,
iterator_C);
//
// Release the semaphore
//
if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/gemm_sparse_universal.h/0 | {
"file_path": "include/cutlass/gemm/kernel/gemm_sparse_universal.h",
"repo_id": "include",
"token_count": 10226
} | 32 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Base scheduler for grouped problems
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumerated type describing the type of scheduling to perform for the ProblemVisitor
enum class GroupScheduleMode {
// Perform all scheduling on device
kDeviceOnly,
// Precompute on the host the full sequence of problems to access
kHostPrecompute
};
/// Visitor class to abstract away the algorithm for iterating over tiles
template <typename ProblemSizeHelper,
typename ThreadblockShape_>
struct BaseGroupedProblemVisitor {
using ThreadblockShape = ThreadblockShape_;
struct ProblemInfo {
static int32_t const kNoPrefetchEntry = -1;
int32_t problem_idx;
int32_t problem_start;
CUTLASS_DEVICE
ProblemInfo() : problem_idx(kNoPrefetchEntry), problem_start(kNoPrefetchEntry) {}
CUTLASS_DEVICE
ProblemInfo(int32_t problem_idx_, int32_t problem_start_) :
problem_idx(problem_idx_), problem_start(problem_start_) {}
};
struct Params {
cutlass::gemm::GemmCoord const *problem_sizes;
int32_t problem_count;
void const *workspace;
int32_t tile_count;
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
Params(): problem_sizes(nullptr), problem_count(0), workspace(nullptr), tile_count(0) { }
/// Ctor
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const *problem_sizes,
int32_t problem_count,
void const *workspace = nullptr,
int32_t tile_count = 0
):
problem_sizes(problem_sizes),
problem_count(problem_count),
workspace(workspace),
tile_count(tile_count)
{}
};
Params params;
int32_t tile_idx;
int32_t problem_tile_start;
int32_t problem_idx;
//
// Methods
//
CUTLASS_DEVICE
BaseGroupedProblemVisitor(
Params const ¶ms_,
int32_t block_idx
):
params(params_),
tile_idx(block_idx),
problem_tile_start(0),
problem_idx(0)
{}
/// Get the grid shape
CUTLASS_HOST_DEVICE
static cutlass::gemm::GemmCoord grid_shape(const cutlass::gemm::GemmCoord& problem) {
return ProblemSizeHelper::grid_shape(problem);
}
/// Gets the global tile index
CUTLASS_HOST_DEVICE
int32_t tile_index() const {
return tile_idx;
}
/// Gets the index of the problem
CUTLASS_HOST_DEVICE
int32_t problem_index() const {
return problem_idx;
}
CUTLASS_HOST_DEVICE
int32_t threadblock_idx() const {
return tile_idx - problem_tile_start;
}
CUTLASS_DEVICE
void advance(int32_t grid_size) {
tile_idx += grid_size;
}
CUTLASS_HOST_DEVICE
static void possibly_transpose_problem(cutlass::gemm::GemmCoord& problem) {
ProblemSizeHelper::possibly_transpose_problem(problem);
}
/// Returns the problem size for the current problem
CUTLASS_HOST_DEVICE
cutlass::gemm::GemmCoord problem_size() const {
GemmCoord problem = params.problem_sizes[problem_idx];
ProblemSizeHelper::possibly_transpose_problem(problem);
return problem;
}
CUTLASS_HOST_DEVICE
static int32_t tile_count(const cutlass::gemm::GemmCoord& grid) {
return ProblemSizeHelper::tile_count(grid);
}
static int32_t group_tile_count(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr, int32_t problem_count) {
int32_t total_tiles = 0;
for (int32_t i = 0; i < problem_count; ++i) {
auto problem = host_problem_sizes_ptr[i];
possibly_transpose_problem(problem);
auto grid = grid_shape(problem);
total_tiles += tile_count(grid);
}
return total_tiles;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ProblemSizeHelper,
typename ThreadblockShape,
GroupScheduleMode GroupScheduleMode_,
int PrefetchTileCount,
int ThreadCount
>
struct GroupedProblemVisitor;
/////////////////////////////////////////////////////////////////////////////////////////////////
// ProblemVisitor that performs all scheduling on device
//
template <typename ProblemSizeHelper,
typename ThreadblockShape,
int PrefetchTileCount,
int ThreadCount>
struct GroupedProblemVisitor<ProblemSizeHelper,
ThreadblockShape,
GroupScheduleMode::kDeviceOnly,
PrefetchTileCount,
ThreadCount>: public BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape> {
using Base = BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape>;
using Params = typename Base::Params;
static int const kThreadCount = ThreadCount;
static bool const kRequiresPrecomputation = false;
static int const kThreadsPerWarp = 32;
struct SharedStorage {};
// Final tile of the problem loaded by this thread. Each thread will hold
// a separate value.
int32_t problem_ending_tile;
SharedStorage &shared_storage;
//
// Methods
//
CUTLASS_DEVICE
GroupedProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base(params_, block_idx),
problem_ending_tile(0),
shared_storage(shared_storage_)
{
this->problem_idx = -1 * kThreadsPerWarp;
this->problem_tile_start = 0;
}
CUTLASS_DEVICE
bool next_tile() {
// Check whether the tile to compute is within the range of the current problem.
int32_t problem_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, this->problem_idx % kThreadsPerWarp);
if (this->tile_idx < problem_tile_end) {
return true;
}
// Check whether the tile to compute is within the current group of problems fetched by the warp.
// The last tile for this group is the final tile of the problem held by the final thread in the warp.
int32_t group_tile_end = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp-1);
// Keep the starting problem for this group in `problem_idx`. This is done to reduce
// register pressure. The starting problem for this group is simply the first problem
// in the group most recently fetched by the warp.
int32_t &group_problem_start = this->problem_idx;
group_problem_start = (this->problem_idx / kThreadsPerWarp) * kThreadsPerWarp;
// Keep the starting tile for this group in `problem_tile_start`. This is done to reduce
// register pressure.
int32_t &group_tile_start = this->problem_tile_start;
// Each thread in the warp processes a separate problem to advance until
// reaching a problem whose starting tile is less less than tile_idx.
while (group_tile_end <= this->tile_idx) {
group_problem_start += kThreadsPerWarp;
if (group_problem_start > this->params.problem_count) {
return false;
}
// Since `group_tile_start` is a reference to `this->problem_tile_start`, this
// also sets `this->problem_tile_start`. The fact that `this->problem_tile_start`
// is also set here is used later in `next_tile`.
group_tile_start = group_tile_end;
int lane_idx = threadIdx.x % kThreadsPerWarp;
int32_t lane_problem = group_problem_start + lane_idx;
// Compute the number of tiles in the problem assigned to each thread.
problem_ending_tile = 0;
if (lane_problem < this->params.problem_count) {
cutlass::gemm::GemmCoord problem = this->params.problem_sizes[lane_problem];
this->possibly_transpose_problem(problem);
cutlass::gemm::GemmCoord grid = this->grid_shape(problem);
problem_ending_tile = this->tile_count(grid);
}
// Compute a warp-wide inclusive prefix sum to compute the ending tile index of
// each thread's problem.
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < kThreadsPerWarp; i <<= 1) {
int32_t val = __shfl_up_sync(0xffffffff, problem_ending_tile, i);
if (lane_idx >= i) {
problem_ending_tile += val;
}
}
// The total tile count for this group is now in the final position of the prefix sum
int32_t tiles_in_group = __shfl_sync(0xffffffff, problem_ending_tile, kThreadsPerWarp-1);
problem_ending_tile += group_tile_start;
group_tile_end += tiles_in_group;
}
// The next problem to process is the first one that does not have ending tile position
// that is greater than or equal to tile index.
int32_t problem_idx_in_group =
__popc(__ballot_sync(0xffffffff, problem_ending_tile <= this->tile_idx));
this->problem_idx = group_problem_start + problem_idx_in_group;
// The starting tile for this problem is the ending tile of the previous problem. In cases
// where `problem_idx_in_group` is the first problem in the group, we do not need to reset
// `problem_tile_start`, because it is set to the previous group's ending tile in the while
// loop above.
if (problem_idx_in_group > 0) {
this->problem_tile_start = __shfl_sync(0xffffffff, problem_ending_tile, problem_idx_in_group - 1);
}
return true;
}
static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count) {
return 0;
}
static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count,
void* host_workspace_ptr) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Precomputes schedule on host and prefetches into shared memory
//
template <typename ProblemSizeHelper,
typename ThreadblockShape,
int PrefetchTileCount,
int ThreadCount>
struct GroupedProblemVisitor<ProblemSizeHelper,
ThreadblockShape,
GroupScheduleMode::kHostPrecompute,
PrefetchTileCount,
ThreadCount> : public BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape> {
static_assert(PrefetchTileCount > 0,
"GroupedProblemVisitor with GroupScheduleMode `kHostPrecompute` currently requires prefetching to shared memory");
using Base = BaseGroupedProblemVisitor<ProblemSizeHelper, ThreadblockShape>;
using Params = typename Base::Params;
using ProblemInfo = typename Base::ProblemInfo;
static bool const kRequiresPrecomputation = true;
static int const kPrefetchTileCount = PrefetchTileCount;
static int const kThreadCount = ThreadCount;
struct SharedStorage {
// Sequence of problem IDs and starting tiles to compute
cutlass::Array<ProblemInfo, kPrefetchTileCount> prefetched_problems;
};
int32_t tiles_computed;
int32_t iterations_per_block;
int32_t block_load_start;
SharedStorage &shared_storage;
ProblemInfo const *problem_info_ptr;
//
// Methods
//
CUTLASS_DEVICE
GroupedProblemVisitor(
Params const ¶ms_,
SharedStorage &shared_storage_,
int32_t block_idx
): Base(params_, block_idx),
tiles_computed(0),
shared_storage(shared_storage_),
problem_info_ptr(reinterpret_cast<ProblemInfo const*>(params_.workspace))
{
iterations_per_block = (params_.tile_count - 1 + gridDim.x) / gridDim.x;
block_load_start = iterations_per_block * block_idx;
// Start prefetching the first set of tiles to compute
prefetch_tiles();
}
CUTLASS_DEVICE
bool next_tile() {
if (this->tile_idx >= this->params.tile_count) {
return false;
}
int32_t prefetch_idx = (tiles_computed % kPrefetchTileCount);
if (prefetch_idx == 0) {
// Ensure all previous stores to shared memory have been completed
__syncthreads();
}
auto problem_info = shared_storage.prefetched_problems[prefetch_idx];
++tiles_computed;
if ((tiles_computed % kPrefetchTileCount) == 0) {
// Begin prefetching next set of tiles. Synchronize first to ensure that
// we don't overwrite the current buffer while someone else is using it.
__syncthreads();
prefetch_tiles();
}
this->problem_idx = problem_info.problem_idx;
this->problem_tile_start = problem_info.problem_start;
return true;
}
static size_t get_workspace_size(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count) {
int32_t total_tiles = Base::group_tile_count(host_problem_sizes_ptr, problem_count);
int32_t entries_per_block = ((total_tiles - 1 + block_count) / block_count);
return sizeof(ProblemInfo) * entries_per_block * block_count;
}
#if !defined(__CUDACC_RTC__)
static void host_precompute(const cutlass::gemm::GemmCoord* host_problem_sizes_ptr,
int32_t problem_count,
int32_t block_count,
void* host_workspace_ptr) {
ProblemInfo* host_problem_info_ptr = reinterpret_cast<ProblemInfo*>(host_workspace_ptr);
int32_t total_tiles = Base::group_tile_count(host_problem_sizes_ptr, problem_count);
int32_t entries_per_block = (total_tiles - 1 + block_count) / block_count;
int tile = 0;
int start_tile = 0;
for (int p_idx = 0; p_idx < problem_count; ++p_idx) {
auto problem = host_problem_sizes_ptr[p_idx];
Base::possibly_transpose_problem(problem);
auto grid = Base::grid_shape(problem);
int tiles = Base::tile_count(grid);
ProblemInfo problem_info(p_idx, start_tile);
for (int i = 0; i < tiles; ++i, ++tile) {
host_problem_info_ptr[(entries_per_block * (tile % block_count)) + (tile / block_count)] = problem_info;
}
start_tile += tiles;
}
}
#endif
private:
CUTLASS_DEVICE
void prefetch_tiles() {
CUTLASS_PRAGMA_UNROLL
for (int32_t i = 0; i < kPrefetchTileCount; i += kThreadCount) {
int32_t offset = threadIdx.x + i;
if (offset < kPrefetchTileCount && (tiles_computed + offset < iterations_per_block)) {
shared_storage.prefetched_problems[offset] = problem_info_ptr[block_load_start + tiles_computed + offset];
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/kernel/grouped_problem_visitor.h/0 | {
"file_path": "include/cutlass/gemm/kernel/grouped_problem_visitor.h",
"repo_id": "include",
"token_count": 6168
} | 33 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Default template for a Blocked-Ell MMA.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/arch/arch.h"
#include "cutlass/arch/wmma.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/gemm/threadblock/default_mma_core_wmma.h"
#endif //CUTLASS_ARCH_WMMA_ENABLED
#include "cutlass/gemm/threadblock/ell_mma_pipelined.h"
#include "cutlass/gemm/threadblock/ell_mma_multistage.h"
#include "cutlass/transform/threadblock/ell_predicated_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Operator class tag
typename OperatorClass_,
/// Tag indicating architecture to tune for
typename ArchTag_,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false
>
struct DefaultEllMma;
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass Simt)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, layout::RowMajor,
arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
arch::OpClassSimt, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass TensorOp)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator
>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass TensorOp)
template <
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator
>
struct DefaultEllMma<float, LayoutA, kAlignmentA, float, LayoutB,
kAlignmentB, float, layout::RowMajor,
arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, float, LayoutA, float,
LayoutB, float, layout::RowMajor, arch::OpClassTensorOp, 2,
arch::OpMultiplyAddFastF16>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
float, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
float, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, float,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for column-major-interleaved output
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator,
/// Number of Interleaved K
int InterleavedK>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass,
ArchTag, ThreadblockShape, WarpShape, InstructionShape, 2,
Operator, true> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, 2, Operator,
true>;
static_assert(kAlignmentA == 128 / sizeof_bits<ElementA>::value,
"Alignment must match thread data map's vector length");
static_assert(kAlignmentB ==128 / sizeof_bits<ElementB>::value,
"Alignment must match thread data map's vector length");
// Define iterators over tiles from the A operand
using IteratorA = cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>, ElementA,
LayoutA, 1, typename MmaCore::IteratorThreadMapA>;
// Define iterators over tiles from the B operand
using IteratorB = cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>, ElementB,
LayoutB, 0, typename MmaCore::IteratorThreadMapB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>,
typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator
>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, layout::RowMajor,
arch::OpClassSimt, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, Stages, Operator, false> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, Operator>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor,
typename MmaCore::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for row-major output (OperatorClass TensorOp)
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator
>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, layout::RowMajor,
arch::OpClassTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, Stages, Operator, false> {
static cutlass::arch::CacheOperation::Kind const CacheOpA =
((sizeof_bits<ElementA>::value * kAlignmentA) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * kAlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, Operator, false, CacheOpA, CacheOpB>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor,
typename MmaCore::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for column-major-interleaved output
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the multistage mainloop
int Stages,
/// Operation performed by GEMM
typename Operator,
/// Number of Interleaved K
int InterleavedK>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass,
ArchTag, ThreadblockShape, WarpShape, InstructionShape,
Stages, Operator, true> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator,
layout::ColumnMajorInterleaved<InterleavedK>, OperatorClass, Stages,
Operator, true>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::Array<ElementA, kAlignmentA>;
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::Array<ElementB, kAlignmentB>;
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileAccessIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaMultistage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
MmaCore::kCacheOpA, IteratorB, typename MmaCore::SmemIteratorB,
MmaCore::kCacheOpB, ElementAccumulator, layout::RowMajor,
typename MmaCore::MmaPolicy, Stages>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for SIMT IDP4A Kernels
template <
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Operation performed by GEMM
typename Operator,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape>
struct DefaultEllMma<int8_t, LayoutA, kAlignmentA, int8_t, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
ArchTag, ThreadblockShape, WarpShape, GemmShape<1, 1, 4>, 2,
Operator, false> {
using InstructionShape = GemmShape<1, 1, 4>;
using ElementA = int8_t;
using ElementB = int8_t;
using OperatorClass = arch::OpClassSimt;
static const bool transposeA = cutlass::platform::is_same< LayoutA, layout::ColumnMajor >::value;
static const bool transposeB = cutlass::platform::is_same< LayoutB, layout::RowMajor >::value;
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, layout::RowMajor,
OperatorClass, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, transposeA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, transposeB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
layout::RowMajor, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
/// Specialization for Wmma TensorOp operator with 2 staged pipeline
template <
///< Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 2, Operator, false> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, 2, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped pipelined matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::EllMmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
LayoutC, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization for Wmma TensorOp operator with 1 staged pipeline
template <
///< Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Operation performed by GEMM
typename Operator>
struct DefaultEllMma<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB,
kAlignmentB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, ArchTag, ThreadblockShape, WarpShape,
InstructionShape, 1, Operator, false> {
// Define the MmaCore components
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
ElementB, LayoutB, ElementAccumulator, LayoutC,
arch::OpClassWmmaTensorOp, 1, Operator>;
// Define iterators over tiles from the A operand
using IteratorA =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kM, MmaCore::Shape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, kAlignmentA>;
// Define iterators over tiles from the B operand
using IteratorB =
cutlass::transform::threadblock::EllPredicatedTileIterator<
cutlass::MatrixShape<MmaCore::Shape::kK, MmaCore::Shape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, kAlignmentB>;
// Define the threadblock-scoped singlestage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaSingleStage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementAccumulator,
LayoutC, typename MmaCore::MmaPolicy>;
};
////////////////////////////////////////////////////////////////////////////////
#endif //CUTLASS_ARCH_WMMA_ENABLED
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/threadblock/default_ell_mma.h/0 | {
"file_path": "include/cutlass/gemm/threadblock/default_ell_mma.h",
"repo_id": "include",
"token_count": 10635
} | 34 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level per channel scale+bias+relu before
matrix multiply-accumulate operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename FragmentActivations, typename FragmentVarMean, typename FragmentGammaBeta>
struct LayernormScaleBiasTransform {
using T = typename FragmentActivations::Element;
static int const NumActivations = FragmentActivations::kElements;
static int const NumVarMean = FragmentVarMean::kElements;
static int const NumGammaBeta = FragmentGammaBeta::kElements;
static int const MmaElements = 2;
// One element has one scale and one bias
static int const MmaScaleBiasPair = 2;
// 16816 has 2 columns and 2 rows
static int const MmaCols = 2;
static int const MmaRows = 2;
using MmaOperand = Array<T, MmaElements>;
using VarMeanOperand = Array<__half2, MmaScaleBiasPair>;
using GammaBetaOperand = Array<T, MmaElements * MmaScaleBiasPair>;
CUTLASS_DEVICE
void transform(MmaOperand &activations,
VarMeanOperand const &var_mean,
GammaBetaOperand const &gamma_beta) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
uint32_t *ptr_activations = reinterpret_cast<uint32_t *>(&activations);
uint32_t const *ptr_var_mean = reinterpret_cast<uint32_t const *>(&var_mean);
uint32_t const *ptr_gamma_beta = reinterpret_cast<uint32_t const *>(&gamma_beta);
// Apply per channel scale+bias+relu if the data is not a special NaN
// (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0.
// We assumes the pair of FP16 are either both inbound or both out-of-bound.
// It requires C to be an even number.
asm volatile(
"{\n\t"
" fma.rn.f16x2 %0, %1, %2, %3;\n"
" fma.rn.f16x2 %0, %4, %0, %5;\n"
"}\n"
: "=r"(ptr_activations[0])
: "r"(ptr_var_mean[0]), "r"(ptr_activations[0]),
"r"(ptr_var_mean[1]),
"r"(ptr_gamma_beta[0]), "r"(ptr_gamma_beta[1]));
#else
assert(0);
#endif
}
CUTLASS_DEVICE
void operator()(FragmentActivations &activations,
FragmentVarMean const &var_mean,
FragmentGammaBeta const &gamma_beta) {
MmaOperand *ptr_activations = reinterpret_cast<MmaOperand *>(&activations);
VarMeanOperand const *ptr_var_mean =
reinterpret_cast<VarMeanOperand const *>(&var_mean);
GammaBetaOperand const *ptr_gamma_beta =
reinterpret_cast<GammaBetaOperand const *>(&gamma_beta);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < (NumActivations / MmaElements); ++i) {
transform(ptr_activations[i],
ptr_var_mean[i / (MmaCols * MmaRows) * MmaRows + i % MmaRows],
ptr_gamma_beta[(i / MmaScaleBiasPair) % MmaCols]);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/gemm/warp/layernorm_scale_bias_transform.h/0 | {
"file_path": "include/cutlass/gemm/warp/layernorm_scale_bias_transform.h",
"repo_id": "include",
"token_count": 1939
} | 35 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines a class for using integer types smaller than one byte in host or
device code.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cstdint>
#else
#include <cstdint>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_size.h"
#include "cutlass/platform/platform.h"
namespace cutlass {
template <int Bits, bool Signed = true>
struct integer_subbyte {
using Storage = uint8_t;
static_assert(Bits <= 8*sizeof(Storage), "Require a subbyte of bits in integer_subbyte");
// "External type"; the integer type for which
// integer_subbyte has a conversion-to operator
using xint_t = typename cutlass::platform::conditional<Signed, int, unsigned>::type;
// Bitmask for truncation from larger integers
static constexpr Storage bits_mask_ = Storage(Storage(-1) >> (8 - Bits));
// Bitmask for the sign bit
static constexpr Storage sign_mask_ = Storage((Signed ? 1 : 0) << (Bits - 1));
// Where the bits are stored
Storage storage;
// Default construction does NOT zero-initialize
integer_subbyte() = default;
// Implicit conversion is DEPRECATED.
// Please use one of the two explicit constructors below.
template<class T,
class Enable = cutlass::platform::enable_if_t<cutlass::platform::is_convertible_v<T, int>>
>
[[deprecated("Implicit conversion is deprecated; please use explicit construction instead")]]
CUTLASS_HOST_DEVICE
integer_subbyte(T value)
: integer_subbyte(static_cast<xint_t>(value)) {}
// CUTLASS code commonly converts both signed and unsigned integers
// into integer_subbyte, so the class provides both explicit
// conversions.
// Precondition: If the external type is unsigned int, then value
// fits in unsigned int (is nonnegative).
CUTLASS_HOST_DEVICE explicit
integer_subbyte(int value)
: storage(reinterpret_cast<Storage const&>(value) & bits_mask_)
{
if constexpr (Signed) {
[[maybe_unused]] constexpr int lower_bound = -(1 << (Bits - 1));
[[maybe_unused]] constexpr int upper_bound = (1 << (Bits - 1)) - 1;
assert(value >= lower_bound);
assert(value < upper_bound);
}
else {
[[maybe_unused]] constexpr unsigned upper_bound = 1u << Bits;
assert(value >= 0);
assert(value < static_cast<int>(upper_bound));
}
}
// Precondition: If the external type is (signed) int, then value
// fits in int.
CUTLASS_HOST_DEVICE explicit
integer_subbyte(unsigned value)
: storage(reinterpret_cast<Storage const&>(value) & bits_mask_)
{
if constexpr (Signed) {
[[maybe_unused]] constexpr int lower_bound = -(1 << (Bits - 1));
[[maybe_unused]] constexpr int upper_bound = (1 << (Bits - 1)) - 1;
assert(value >= lower_bound);
assert(value < upper_bound);
}
else {
[[maybe_unused]] constexpr unsigned upper_bound = 1u << Bits;
assert(value < upper_bound);
}
}
// Convert to the "external" integer type (int or unsigned)
CUTLASS_HOST_DEVICE
operator xint_t() const {
if (sign_mask_ & storage) { // Sign extend
return xint_t(storage) | ~xint_t(bits_mask_);
} else {
return xint_t(storage);
}
}
CUTLASS_HOST_DEVICE
bool operator==(integer_subbyte const& rhs) const {
return storage == rhs.storage;
}
CUTLASS_HOST_DEVICE
bool operator!=(integer_subbyte const& rhs) const {
return storage != rhs.storage;
}
CUTLASS_HOST_DEVICE
bool operator<(integer_subbyte const& rhs) const {
if ((sign_mask_ & storage) == (sign_mask_ & rhs.storage)) {
// If both *this and rhs have the same sign, compare storage directly.
return storage < rhs.storage;
}
else {
// If *this and rhs don't have the same sign,
// then return whether *this is negative.
return sign_mask_ & storage;
}
}
CUTLASS_HOST_DEVICE
bool operator<=(integer_subbyte const& rhs) const {
if ((sign_mask_ & storage) == (sign_mask_ & rhs.storage)) {
// If both *this and rhs have the same sign, compare storage directly.
return storage <= rhs.storage;
}
else {
// If *this and rhs don't have the same sign,
// then return whether *this is negative.
return sign_mask_ & storage;
}
}
CUTLASS_HOST_DEVICE
bool operator>=(integer_subbyte const& rhs) const {
return !(*this < rhs);
}
CUTLASS_HOST_DEVICE
bool operator>(integer_subbyte const& rhs) const {
return !(*this <= rhs);
}
CUTLASS_HOST_DEVICE friend integer_subbyte
conj(integer_subbyte const& x) {
return x;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// 1-bit Unsigned integer type
using uint1b_t = integer_subbyte<1, false>;
/// 2-bit Integer type
using int2b_t = integer_subbyte<2, true>;
/// 2-bit Unsigned integer type
using uint2b_t = integer_subbyte<2, false>;
/// 4-bit Integer type
using int4b_t = integer_subbyte<4, true>;
/// 4-bit Unsigned integer type
using uint4b_t = integer_subbyte<4, false>;
/// 1-bit binary type
using bin1_t = bool;
///////////////////////////////////////////////////////////////////////////////////////////////////
template <int Bits, bool Signed>
struct sizeof_bits<integer_subbyte<Bits,Signed>> {
static constexpr int value = Bits;
};
/// Defines the size of an element in bits - specialized for bin1_t
template <>
struct sizeof_bits<bin1_t> {
static constexpr int value = 1;
};
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace platform {
/// Forward Declaration
template <class T>
struct numeric_limits;
// Specialization for signed integer_subbyte
template<int NumBits>
struct numeric_limits<cutlass::integer_subbyte<NumBits, true>> {
private:
using value_type = cutlass::integer_subbyte<NumBits, true>;
public:
CUTLASS_HOST_DEVICE static value_type lowest() noexcept {
return value_type{
-(1 << (NumBits - 1))
};
}
CUTLASS_HOST_DEVICE static value_type max() noexcept {
return value_type{
(1 << (NumBits - 1)) - 1
};
}
CUTLASS_HOST_DEVICE static value_type const min() noexcept {
return lowest();
}
static constexpr bool is_integer = true;
static constexpr bool is_signed = true;
static constexpr bool has_infinity = false;
};
// Specialization for unsigned integer_subbyte
template<int NumBits>
struct numeric_limits<cutlass::integer_subbyte<NumBits, false>> {
private:
using value_type = cutlass::integer_subbyte<NumBits, false>;
public:
CUTLASS_HOST_DEVICE static value_type lowest() noexcept {
return value_type{0u};
}
CUTLASS_HOST_DEVICE static value_type max() noexcept {
return value_type{
(1u << NumBits) - 1u
};
}
CUTLASS_HOST_DEVICE static value_type const min() noexcept {
return lowest();
}
static constexpr bool is_integer = true;
static constexpr bool is_signed = false;
};
} // namespace platform
} // namespace cutlass
| include/cutlass/integer_subbyte.h/0 | {
"file_path": "include/cutlass/integer_subbyte.h",
"repo_id": "include",
"token_count": 2879
} | 36 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Boost-like numeric conversion operator for CUTLASS numeric types
*/
#pragma once
#if !defined(__CUDACC_RTC__)
#include <cfenv>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/thread/unary_op.h"
#include "cutlass/array.h"
#include "cutlass/half.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Floating-point rounding style similare to Standard Library's formats but supporting
/// additional rounding options.
enum class FloatRoundStyle {
round_indeterminate, ///< rounding mode unknown
round_toward_zero, ///< round toward zero
round_to_nearest, ///< round to nearest even
round_to_nearest_satfinite, ///< round to nearest even, capping value to min and max of destination type
round_toward_infinity, ///< round toward infinity
round_toward_neg_infinity, ///< round toward negative infinity
round_half_ulp_truncate, ///< add 0.5ulp to integer representation then round toward zero
round_half_ulp_trunc_dntz ///< like round_half_ulp_truncate, except denorms are rounded *toward* zero
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename T,
typename S,
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
struct NumericConverter {
using result_type = T;
using source_type = S;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return static_cast<result_type>(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float => int32_t
//
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__CUDA_ARCH__)
template <>
struct NumericConverter<int32_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = int32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_DEVICE
static result_type convert(source_type const & s) {
return __float2int_rn(s);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<int32_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = int32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
CUTLASS_DEVICE
static result_type convert(source_type const & s) {
return __float2int_rz(s);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#elif !defined(__CUDACC_RTC__)
template <>
struct NumericConverter<int32_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = int32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
static result_type convert(source_type const & s) {
std::fesetround(FE_TONEAREST);
return (result_type)std::nearbyint(s);
}
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<int32_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = int32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
static result_type convert(source_type const & s) {
std::fesetround(FE_TOWARDZERO);
return (result_type)std::nearbyint(s);
}
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float => int8_t
//
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__CUDA_ARCH__)
template <>
struct NumericConverter<int8_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = int8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_DEVICE
static result_type convert(source_type const & s) {
int32_t intermediate;
asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(intermediate) : "f"(s));
return static_cast<result_type>(intermediate);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<int8_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = int8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
CUTLASS_DEVICE
static result_type convert(source_type const & s) {
int32_t intermediate;
asm volatile("cvt.rzi.sat.s8.f32 %0, %1;" : "=r"(intermediate) : "f"(s));
return static_cast<result_type>(intermediate);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<uint8_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = uint8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_DEVICE
static result_type convert(source_type const & s) {
int32_t intermediate;
asm volatile("cvt.rni.sat.u8.f32 %0, %1;" : "=r"(intermediate) : "f"(s));
return static_cast<result_type>(intermediate);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<uint8_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = uint8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
CUTLASS_DEVICE
static result_type convert(source_type const & s) {
int32_t intermediate;
asm volatile("cvt.rzi.sat.u8.f32 %0, %1;" : "=r"(intermediate) : "f"(s));
return static_cast<result_type>(intermediate);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#elif !defined(__CUDACC_RTC__)
template <>
struct NumericConverter<int8_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = int8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
static result_type convert(source_type const & s) {
std::fesetround(FE_TONEAREST);
int32_t intermediate = (int32_t)std::nearbyint(s);
// Low-end saturation
intermediate = std::max(intermediate, (int32_t)std::numeric_limits<int8_t>::lowest());
// High-end saturation
intermediate = std::min(intermediate, (int32_t)std::numeric_limits<int8_t>::max());
return static_cast<result_type>(intermediate);
}
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<int8_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = int8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
static result_type convert(source_type const & s) {
std::fesetround(FE_TOWARDZERO);
int32_t intermediate = (int32_t)std::nearbyint(s);
// Low-end saturation
intermediate = std::max(intermediate, (int32_t)std::numeric_limits<int8_t>::lowest());
// High-end saturation
intermediate = std::min(intermediate, (int32_t)std::numeric_limits<int8_t>::max());
return static_cast<result_type>(intermediate);
}
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<uint8_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = uint8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
static result_type convert(source_type const & s) {
std::fesetround(FE_TONEAREST);
int32_t intermediate = (int32_t)std::nearbyint(s);
// Low-end saturation
intermediate = std::max(intermediate, (int32_t)std::numeric_limits<uint8_t>::lowest());
// High-end saturation
intermediate = std::min(intermediate, (int32_t)std::numeric_limits<uint8_t>::max());
return static_cast<result_type>(intermediate);
}
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<uint8_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = uint8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
static result_type convert(source_type const & s) {
std::fesetround(FE_TOWARDZERO);
int32_t intermediate = (int32_t)std::nearbyint(s);
// Low-end saturation
intermediate = std::max(intermediate, (int32_t)std::numeric_limits<uint8_t>::lowest());
// High-end saturation
intermediate = std::min(intermediate, (int32_t)std::numeric_limits<uint8_t>::max());
return static_cast<result_type>(intermediate);
}
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float => integer_subbyte
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template<int Bits, FloatRoundStyle Round>
struct NumericConverter<integer_subbyte<Bits, /* Signed = */ true>, float, Round> {
private:
static constexpr bool result_is_signed = true;
public:
using result_type = integer_subbyte<Bits, result_is_signed>;
using source_type = float;
static constexpr FloatRoundStyle round_style = Round;
CUTLASS_HOST_DEVICE static result_type
convert(source_type const& src) {
using middle_type = int;
static_assert(8 * sizeof(middle_type) > Bits, "This conversion "
"requires that integer_subbyte have fewer representation bits "
"than the number of bits in int.");
auto middle = NumericConverter<middle_type, source_type, Round>::convert(src);
return NumericConverter<result_type, middle_type, Round>::convert(middle);
}
CUTLASS_HOST_DEVICE result_type
operator()(source_type const& s) const {
return convert(s);
}
};
template<int Bits, FloatRoundStyle Round>
struct NumericConverter<integer_subbyte<Bits, /* Signed = */ false>, float, Round> {
private:
static constexpr bool result_is_signed = false;
public:
using result_type = integer_subbyte<Bits, result_is_signed>;
using source_type = float;
static constexpr FloatRoundStyle round_style = Round;
CUTLASS_HOST_DEVICE static result_type
convert(source_type const& src) {
using middle_type = unsigned;
static_assert(8 * sizeof(middle_type) > Bits, "This conversion "
"requires that integer_subbyte have fewer representation bits "
"than the number of bits in unsigned int.");
auto middle = NumericConverter<middle_type, source_type, Round>::convert(src);
return NumericConverter<result_type, middle_type, Round>::convert(middle);
}
CUTLASS_HOST_DEVICE result_type
operator()(source_type const& s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float <= cutlass::half_t
template <typename T, FloatRoundStyle Round>
struct NumericConverter<T, T, Round> {
using result_type = T;
using source_type = T;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return s;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float <=> cutlass::half_t
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float <= cutlass::half_t
template <FloatRoundStyle Round>
struct NumericConverter<float, cutlass::half_t, Round> {
using result_type = float;
using source_type = cutlass::half_t;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
result_type result = static_cast<float>(s);
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Specialization for round-to-nearest
template <>
struct NumericConverter<cutlass::half_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = cutlass::half_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
result_type result = static_cast<cutlass::half_t>(s);
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Specialization for round-toward-zero
template <>
struct NumericConverter<cutlass::half_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = cutlass::half_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
/// Round toward zero
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & flt) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return cutlass::half_t(__float2half_rz(flt));
#else
// software implementation rounds toward nearest even
unsigned const& s = reinterpret_cast<unsigned const &>(flt);
uint16_t sign = uint16_t((s >> 16) & 0x8000);
int32_t exp = int32_t((s >> 23) & 0xff) - 127;
int mantissa = s & 0x7fffff;
uint16_t u = 0;
if ((s & 0x7fffffff) == 0) {
// sign-preserving zero
return cutlass::half_t::bitcast(sign);
}
if (exp > 15) {
if (exp == 128 && mantissa) {
// not a number
u = 0x7fff;
} else {
// overflow to infinity
u = sign | 0x7c00;
}
return cutlass::half_t::bitcast(u);
}
if (exp >= -14) {
// normal fp32 to normal fp16
u = uint16_t((uint32_t(exp + 15) & 0x1f) << 10);
u = uint16_t(u | (mantissa >> 13));
} else {
// normal single-precision to subnormal cutlass::half_t-precision representation
int rshift = (-14 - exp);
if (rshift < 32) {
mantissa |= (1 << 23);
mantissa = (mantissa >> rshift);
u = (uint16_t(mantissa >> 13) & 0x3ff);
} else {
mantissa = 0;
u = 0;
}
}
u |= sign;
return cutlass::half_t::bitcast(u);
#endif // defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float <=> cutlass::bfloat16_t
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float <= cutlass::bfloat16_t
template <FloatRoundStyle Round>
struct NumericConverter<float, cutlass::bfloat16_t, Round> {
using result_type = float;
using source_type = cutlass::bfloat16_t;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return static_cast<float>(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<cutlass::bfloat16_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = cutlass::bfloat16_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return static_cast<cutlass::bfloat16_t>(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<cutlass::bfloat16_t, float, FloatRoundStyle::round_half_ulp_truncate> {
using result_type = cutlass::bfloat16_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_half_ulp_truncate;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
uint32_t x32 = reinterpret_cast<uint32_t const &>(s);
#if defined(__CUDA_ARCH__)
if (::isfinite(s)) {
x32 += 0x8000;
}
#else
if (std::isfinite(s)) {
x32 += 0x8000;
}
#endif
uint16_t x16 = uint16_t((x32 >> 16) & 0xffff);
return cutlass::bfloat16_t::bitcast(x16);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<cutlass::bfloat16_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = cutlass::bfloat16_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
uint32_t x32 = reinterpret_cast<uint32_t const &>(s);
uint16_t x16 = uint16_t(x32 >> 16);
return cutlass::bfloat16_t::bitcast(x16);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float <=> cutlass::tfloat32_t
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float <= cutlass::tfloat32_t
template <FloatRoundStyle Round>
struct NumericConverter<float, cutlass::tfloat32_t, Round> {
using result_type = float;
using source_type = cutlass::tfloat32_t;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return static_cast<float>(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<cutlass::tfloat32_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = cutlass::tfloat32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
unsigned storage = reinterpret_cast<unsigned const &>(s);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 900
asm volatile("cvt.rn.tf32.f32 %0, %1;" : "=r"(storage) : "r"(storage));
#else
if ((storage & 0x7f800000) != 0x7f800000) {
bool mantissa_bit = ((storage & (1 << 13)) != 0);
bool round_bit = ((storage & (1 << 12)) != 0);
bool sticky_bit = ((storage & ((1 << 12) - 1)) != 0);
if ((round_bit && sticky_bit) || (round_bit && mantissa_bit)) {
storage += uint32_t(1 << 13);
}
// Note, the following is intentionally commented out. TF32
// does not define the low order bits, so they may be left in
// an undefined state.
//
// By not truncating these bit explicitly, we avoid an extra logical
// operation.
//
// TF32 may be implicitly converted to float by performing this
// operation as needed.
//
// storage = (storage & ~0x1fff);
}
else if (storage & ~0xff800000) {
storage = 0x7fffffff;
}
#endif
return cutlass::tfloat32_t::bitcast(storage);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<cutlass::tfloat32_t, float, FloatRoundStyle::round_half_ulp_truncate> {
using result_type = cutlass::tfloat32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_half_ulp_truncate;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return cutlass::tfloat32_t::round_half_ulp_truncate(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// This rounding operation is similar to half_ulp_truncate except it rounds denorms toward zero.
/// It avoids predicated code, though it requires a temporary register.
template <>
struct NumericConverter<cutlass::tfloat32_t, float, FloatRoundStyle::round_half_ulp_trunc_dntz> {
using result_type = cutlass::tfloat32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_half_ulp_trunc_dntz;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
unsigned y = reinterpret_cast<unsigned const &>(s);
y = y & 0xff800000;
float d = reinterpret_cast<float const &>(y);
float z = d / float(1 << 11) + s;
return reinterpret_cast<result_type const &>(z);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <>
struct NumericConverter<cutlass::tfloat32_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = cutlass::tfloat32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
uint32_t x = reinterpret_cast<uint32_t const &>(s);
return cutlass::tfloat32_t::bitcast(x & 0xffffe000);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Conversion operator for float to cutlass::tfloat32_t big and small values
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
FloatRoundStyle RoundBig = FloatRoundStyle::round_toward_zero,
FloatRoundStyle RoundSmall = FloatRoundStyle::round_half_ulp_truncate
>
struct NumericConverterFastF32 {
// result_type holds big cutlass::tfloat32_t at idx(0) and small cutlass::tfloat32_t at idx(1)
using result_type = Array<cutlass::tfloat32_t, 2>;
// source data type
using source_type = float;
// rounding styles for big and small part
static FloatRoundStyle const kRoundBig = RoundBig;
static FloatRoundStyle const kRoundSmall = RoundSmall;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
result_type result;
NumericConverter<cutlass::tfloat32_t, float, kRoundBig> convert_big_;
NumericConverter<cutlass::tfloat32_t, float, kRoundSmall> convert_small_;
// convert and fill cutlass::tfloat32_t big at idx 0
result[0] = convert_big_(source);
// convert and fill cutlass::tfloat32_t small at idx 1
result[1] = convert_small_(source - static_cast<float>(result[0]));
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Conversion and Clamp operator for Integers
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename T,
typename S
>
struct NumericConverterClamp {
using result_type = T;
using source_type = S;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
NumericConverter<result_type, source_type> convert_op;
result_type const kClamp_max = cutlass::platform::numeric_limits<result_type>::max();
result_type const kClamp_min = cutlass::platform::numeric_limits<result_type>::lowest();
if (s < (source_type)kClamp_min)
return kClamp_min;
if (s > (source_type)kClamp_max)
return kClamp_max;
return convert_op(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
// This converter is needed to enable cutlass::half_t output types when using int32_t accumulators.
// Since floating-point types do not require a clamp, this converter simply casts from
// the source type to cutlass::half_t.
template <
typename S
>
struct NumericConverterClamp<cutlass::half_t, S> {
using result_type = cutlass::half_t;
using source_type = S;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const &source) {
return static_cast<cutlass::half_t>(source);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Conversion operator for Array
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Conversion operator for Array
template <
typename T,
typename S,
int N,
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest,
typename Transform = cutlass::transform::thread::UnaryTransform::Identity
>
struct NumericArrayConverter {
using result_type = Array<T, N>;
using source_type = Array<S, N>;
static FloatRoundStyle const round_style = Round;
static_assert(platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value ||
platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
result_type result;
NumericConverter<T, S, Round> convert_;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
if (platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value) {
result[i] = convert_(s[i]);
} else { // conjugate
result[i] = conj(convert_(s[i]));
}
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <
typename T,
int N,
FloatRoundStyle Round,
typename Transform
>
struct NumericArrayConverter<T, T, N, Round, Transform> {
using result_type = Array<T, N>;
using source_type = Array<T, N>;
static FloatRoundStyle const round_style = Round;
static_assert(platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value ||
platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
CUTLASS_HOST_DEVICE
static result_type convert(source_type const &source) {
if (platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value) {
return source;
} else {
result_type result;
for (int i = 0; i < N; ++i) {
result[i] = conj(static_cast<typename source_type::Element>(source[i]));
}
return result;
}
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<half, 2> <= Array<float, 2>, round to nearest
template <>
struct NumericArrayConverter<cutlass::half_t, float, 2, FloatRoundStyle::round_to_nearest> {
using result_type = Array<cutlass::half_t, 2>;
using source_type = Array<float, 2>;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
Array<cutlass::half_t, 2> result;
reinterpret_cast<__half2 &>(result) = __float22half2_rn(reinterpret_cast<float2 const &>(source));
return result;
#else
NumericConverter<cutlass::half_t, float, round_style> convert_;
// NOTE: cutlass::Array<half, N> is NOT an aggregate type and
// below `{}` does NOT conduct zero initialization. Below `{}` will
// conduct default initialization (calling default ctr). We use this syntax
// to resolve compiler warning on uninitialized member variable.
Array<cutlass::half_t, 2> result{};
result[0] = convert_(source[0]);
result[1] = convert_(source[1]);
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float, 2> <= Array<cutlass::half_t, 2>, round to nearest
template <FloatRoundStyle Round>
struct NumericArrayConverter<float, cutlass::half_t, 2, Round> {
using result_type = Array<float, 2>;
using source_type = Array<cutlass::half_t, 2>;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
float2 result2 = __half22float2(reinterpret_cast<__half2 const &>(source));
return {
float{result2.x},
float{result2.y}
};
#else
NumericConverter<float, cutlass::half_t, round_style> convert_;
return {
convert_(source[0]),
convert_(source[1])
};
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<half> <= Array<float>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<cutlass::half_t, float, N, Round> {
using result_type = Array<cutlass::half_t, N>;
using source_type = Array<float, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<cutlass::half_t, float, 2, Round> convert_vector_;
NumericConverter<cutlass::half_t, float, Round> convert_element_;
result_type result;
Array<cutlass::half_t, 2> *result_ptr = reinterpret_cast<Array<cutlass::half_t, 2> *>(&result);
Array<float, 2> const *source_ptr = reinterpret_cast<Array<float, 2> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
if (N % 2) {
result[N - 1] = convert_element_(source[N - 1]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<half> <= Array<float>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float, cutlass::half_t, N, Round> {
using result_type = Array<float, N>;
using source_type = Array<cutlass::half_t, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<float, cutlass::half_t, 2, Round> convert_vector_;
NumericConverter<float, cutlass::half_t, Round> convert_element_;
result_type result;
Array<float, 2> *result_ptr = reinterpret_cast<Array<float, 2> *>(&result);
Array<cutlass::half_t, 2> const *source_ptr = reinterpret_cast<Array<cutlass::half_t, 2> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
if (N % 2) {
result[N - 1] = convert_element_(source[N - 1]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::bfloat16_t, 2> <= Array<float, 2>, round to nearest
template <>
struct NumericArrayConverter<cutlass::bfloat16_t, float, 2, FloatRoundStyle::round_to_nearest> {
using result_type = Array<cutlass::bfloat16_t, 2>;
using source_type = Array<float, 2>;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
unsigned d;
asm("cvt.rn.bf16x2.f32 %0, %1, %2;\n" : "=r"(d) : "f"(source[1]), "f"(source[0]) );
return reinterpret_cast<result_type const &>(d);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<cutlass::bfloat16_t> <= Array<float>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<cutlass::bfloat16_t, float, N, Round> {
using result_type = Array<cutlass::bfloat16_t, N>;
using source_type = Array<float, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<cutlass::bfloat16_t, float, 2, Round> convert_vector_;
NumericConverter<cutlass::bfloat16_t, float, Round> convert_element_;
result_type result;
Array<cutlass::bfloat16_t, 2> *result_ptr = reinterpret_cast<Array<cutlass::bfloat16_t, 2> *>(&result);
Array<float, 2> const *source_ptr = reinterpret_cast<Array<float, 2> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
if (N % 2) {
result[N - 1] = convert_element_(source[N - 1]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#endif // if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conditional guards to enable partial specialization for packed integers
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 720) && \
((__CUDACC_VER_MAJOR__ > 10) || \
((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2)))
/// Partial specialization for Array<int8_t, 1> <= Array<int, 1>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, int, 1, Round> {
using result_type = Array<int8_t, 1>;
using source_type = Array<int, 1>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericConverter<int8_t, int, Round> convert_element_;
result_type result;
result[0] = convert_element_(source[0]);
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<int8_t, 2> <= Array<int, 2>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, int, 2, Round> {
using result_type = Array<int8_t, 2>;
using source_type = Array<int, 2>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
uint32_t tmp;
asm volatile(
"cvt.pack.sat.s8.s32.b32 %0, %2, %1, 0;\n"
: "=r"(tmp) : "r"(source[0]), "r"(source[1]));
uint16_t out = (tmp & 0xffff);
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<int8_t, 4> <= Array<int, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, int, 4, Round> {
using result_type = Array<int8_t, 4>;
using source_type = Array<int, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
unsigned out;
asm volatile(
"{ .reg .u32 r4;"
"cvt.pack.sat.s8.s32.b32 r4, %4, %3, 0;"
"cvt.pack.sat.s8.s32.b32 %0, %2, %1, r4;"
"}"
: "=r"(out) : "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3]));
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<int8_t> <= Array<int>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, int, N, Round> {
static_assert(!(N % 4), "N must be multiple of 4.");
using result_type = Array<int8_t, N>;
using source_type = Array<int, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<int8_t, int, 4, Round> convert_vector_;
result_type result;
Array<int8_t, 4> *result_ptr = reinterpret_cast<Array<int8_t, 4> *>(&result);
Array<int, 4> const *source_ptr = reinterpret_cast<Array<int, 4> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 4; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<uint8_t, 1> <= Array<int, 1>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<uint8_t, int, 1, Round> {
using result_type = Array<uint8_t, 1>;
using source_type = Array<int, 1>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericConverter<uint8_t, int, Round> convert_element_;
result_type result;
result[0] = convert_element_(source[0]);
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<uint8_t, 2> <= Array<int, 2>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<uint8_t, int, 2, Round> {
using result_type = Array<uint8_t, 2>;
using source_type = Array<int, 2>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
uint32_t tmp;
asm volatile(
"cvt.pack.sat.u8.s32.b32 %0, %2, %1, 0;\n"
: "=r"(tmp) : "r"(source[0]), "r"(source[1]));
uint16_t out = (tmp & 0xffff);
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<uint8_t, 4> <= Array<int, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<uint8_t, int, 4, Round> {
using result_type = Array<uint8_t, 4>;
using source_type = Array<int, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
unsigned out;
asm volatile(
"{ .reg .u32 r4;"
"cvt.pack.sat.u8.s32.b32 r4, %4, %3, 0;"
"cvt.pack.sat.u8.s32.b32 %0, %2, %1, r4;"
"}"
: "=r"(out) : "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3]));
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<int8_t> <= Array<int>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<uint8_t, int, N, Round> {
static_assert(!(N % 4), "N must be multiple of 4.");
using result_type = Array<uint8_t, N>;
using source_type = Array<int, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<uint8_t, int, 4, Round> convert_vector_;
result_type result;
Array<uint8_t, 4> *result_ptr = reinterpret_cast<Array<uint8_t, 4> *>(&result);
Array<int, 4> const *source_ptr = reinterpret_cast<Array<int, 4> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 4; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<float, N> <=> Array<float_e4m3_t, N>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<float, 2> <= Array<float_e4m3_t, 2>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float, cutlass::float_e4m3_t, 2, Round> {
using result_element = float;
using source_element = cutlass::float_e4m3_t;
using result_type = Array<result_element, 2>;
using source_type = Array<source_element, 2>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out_fp16;
uint16_t const& src_packed = reinterpret_cast<uint16_t const&>(source);
asm volatile( \
"{\n" \
"cvt.rn.f16x2.e4m3x2 %0, %1;\n" \
"}\n" : "=r"(out_fp16): "h"(src_packed));
float2 res0 = __half22float2(reinterpret_cast<__half2 &>(out_fp16));
result_type out;
out[0] = res0.x;
out[1] = res0.y;
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e4m3_t, 2> <= Array<float, 2>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, float, 2, Round> {
using result_element = cutlass::float_e4m3_t;
using source_element = float;
using result_type = Array<result_element, 2>;
using source_type = Array<source_element, 2>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t out;
asm volatile( \
"{\n" \
"cvt.rn.satfinite.e4m3x2.f32 %0, %2, %1;\n" \
"}" \
: "=h"(out) : "f"(source[0]), "f"(source[1]));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float, 2> <= Array<float_e5m2_t, 2>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float, cutlass::float_e5m2_t, 2, Round> {
using result_element = float;
using source_element = cutlass::float_e5m2_t;
using result_type = Array<result_element, 2>;
using source_type = Array<source_element, 2>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out_fp16;
uint16_t const& src_packed = reinterpret_cast<uint16_t const&>(source);
asm volatile( \
"{\n" \
"cvt.rn.f16x2.e5m2x2 %0, %1;\n" \
"}\n" : "=r"(out_fp16): "h"(src_packed));
float2 res0 = __half22float2(reinterpret_cast<__half2 &>(out_fp16));
result_type out;
out[0] = res0.x;
out[1] = res0.y;
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 2; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
namespace detail {
/// Special converters that can be used with 4 8-bit elements packed in a register.
/// Common use is for fast FP8 converters.
template <
typename T,
typename S,
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest,
typename Transform = cutlass::transform::thread::UnaryTransform::Identity
>
struct NumericArrayConverterPacked4Element {
using result_type = Array<T, 4>;
using source_type = Array<S, 4>;
static FloatRoundStyle const round_style = Round;
static_assert(platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value ||
platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
result_type result;
NumericConverter<T, S, Round> convert_;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
if (platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value) {
result[i] = convert_(s[i]);
}
else { // conjugate
result[i] = conj(convert_(s[i]));
}
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float, 4> <= Array<float_e4m3_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float, cutlass::float_e4m3_t, Round> {
using result_element = float;
using source_element = cutlass::float_e4m3_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out_fp16[2];
uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source);
asm volatile( \
"{\n" \
".reg .b16 lo, hi;\n" \
"mov.b32 {lo, hi}, %2;\n" \
"cvt.rn.f16x2.e4m3x2 %0, lo;\n" \
"cvt.rn.f16x2.e4m3x2 %1, hi;\n" \
"}\n" : "=r"(out_fp16[0]), "=r"(out_fp16[1]) : "r"(src_packed));
float2 res0 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[0]));
float2 res1 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[1]));
result_type out;
out[0] = res0.x;
out[1] = res0.y;
out[2] = res1.x;
out[3] = res1.y;
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e4m3_t, 4> <= Array<float, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e4m3_t, float, Round> {
using result_element = cutlass::float_e4m3_t;
using source_element = float;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out;
asm volatile( \
"{\n" \
".reg .b16 lo;\n" \
".reg .b16 hi;\n" \
"cvt.rn.satfinite.e4m3x2.f32 lo, %2, %1;\n" \
"cvt.rn.satfinite.e4m3x2.f32 hi, %4, %3;\n" \
"mov.b32 %0, {lo, hi};\n" \
"}" \
: "=r"(out) : "f"(source[0]), "f"(source[1]), "f"(source[2]), "f"(source[3]));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<float, 4> <=> Array<float_e5m2_t, 4>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<float, 4> <= Array<float_e5m2_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float, cutlass::float_e5m2_t, Round> {
using result_element = float;
using source_element = cutlass::float_e5m2_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out_fp16[2];
uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source);
asm volatile( \
"{\n" \
".reg .b16 lo, hi;\n" \
"mov.b32 {lo, hi}, %2;\n" \
"cvt.rn.f16x2.e5m2x2 %0, lo;\n" \
"cvt.rn.f16x2.e5m2x2 %1, hi;\n" \
"}\n" : "=r"(out_fp16[0]), "=r"(out_fp16[1]) : "r"(src_packed));
float2 res0 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[0]));
float2 res1 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[1]));
result_type out;
out[0] = res0.x;
out[1] = res0.y;
out[2] = res1.x;
out[3] = res1.y;
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e5m2_t, 4> <= Array<float, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e5m2_t, float, Round> {
using result_element = cutlass::float_e5m2_t;
using source_element = float;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out;
asm volatile( \
"{\n" \
".reg .b16 lo;\n" \
".reg .b16 hi;\n" \
"cvt.rn.satfinite.e5m2x2.f32 lo, %2, %1;\n" \
"cvt.rn.satfinite.e5m2x2.f32 hi, %4, %3;\n" \
"mov.b32 %0, {lo, hi};\n" \
"}" \
: "=r"(out) : "f"(source[0]), "f"(source[1]), "f"(source[2]), "f"(source[3]));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<cutlass::half_t, 4> <=> Array<float_e4m3_t, 4>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::half_t, 4> <= Array<float_e4m3_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<cutlass::half_t, cutlass::float_e4m3_t, Round> {
using result_element = cutlass::half_t;
using source_element = cutlass::float_e4m3_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out[2];
uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source);
asm volatile( \
"{\n" \
".reg .b16 lo, hi;\n" \
"mov.b32 {lo, hi}, %2;\n" \
"cvt.rn.f16x2.e4m3x2 %0, lo;\n" \
"cvt.rn.f16x2.e4m3x2 %1, hi;\n" \
"}\n" : "=r"(out[0]), "=r"(out[1]) : "r"(src_packed));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e4m3_t, 4> <= Array<cutlass::half_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e4m3_t, cutlass::half_t, Round> {
using result_element = cutlass::float_e4m3_t;
using source_element = cutlass::half_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out;
uint32_t const* src_packed = reinterpret_cast<uint32_t const*>(&source);
asm volatile( \
"{\n" \
".reg .b16 lo;\n" \
".reg .b16 hi;\n" \
"cvt.rn.satfinite.e4m3x2.f16x2 lo, %1;\n" \
"cvt.rn.satfinite.e4m3x2.f16x2 hi, %2;\n" \
"mov.b32 %0, {lo, hi};\n" \
"}" \
: "=r"(out) : "r"(src_packed[0]), "r"(src_packed[1]));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<cutlass::half_t, 4> <=> Array<float_e5m2_t, 4>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::half_t, 4> <= Array<float_e5m2_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<cutlass::half_t, cutlass::float_e5m2_t, Round> {
using result_element = cutlass::half_t;
using source_element = cutlass::float_e5m2_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out[2];
uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source);
asm volatile( \
"{\n" \
".reg .b16 lo, hi;\n" \
"mov.b32 {lo, hi}, %2;\n" \
"cvt.rn.f16x2.e5m2x2 %0, lo;\n" \
"cvt.rn.f16x2.e5m2x2 %1, hi;\n" \
"}\n" : "=r"(out[0]), "=r"(out[1]) : "r"(src_packed));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e5m2_t, 4> <= Array<cutlass::half_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e5m2_t, cutlass::half_t, Round> {
using result_element = cutlass::float_e5m2_t;
using source_element = cutlass::half_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out;
uint32_t const* src_packed = reinterpret_cast<uint32_t const*>(&source);
asm volatile( \
"{\n" \
".reg .b16 lo;\n" \
".reg .b16 hi;\n" \
"cvt.rn.satfinite.e5m2x2.f16x2 lo, %1;\n" \
"cvt.rn.satfinite.e5m2x2.f16x2 hi, %2;\n" \
"mov.b32 %0, {lo, hi};\n" \
"}" \
: "=r"(out) : "r"(src_packed[0]), "r"(src_packed[1]));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<cutlass::bfloat16_t, 4> <=> Array<float_e4m3_t, 4>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::bfloat16_t, 4> <= Array<float_e4m3_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<cutlass::bfloat16_t, cutlass::float_e4m3_t, Round> {
using result_element = cutlass::bfloat16_t;
using source_element = cutlass::float_e4m3_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
// Convert f8 to float
NumericArrayConverterPacked4Element<float, source_element, Round> src2float;
Array<float, 4> tmp_floats = src2float(source);
// Convert float to bf16
result_type out;
Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp_floats);
Array<result_element, 2>* packed_out = reinterpret_cast<Array<result_element, 2>*>(&out);
NumericArrayConverter<result_element, float, 2, Round> float2result;
packed_out[0] = float2result(packed_tmp[0]);
packed_out[1] = float2result(packed_tmp[1]);
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e4m3_t, 4> <= Array<cutlass::bfloat16_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e4m3_t, cutlass::bfloat16_t, Round> {
using result_element = cutlass::float_e4m3_t;
using source_element = cutlass::bfloat16_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
// Convert bf16 to float
Array<float, 4> tmp;
Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp);
Array<source_element, 2> const* packed_source = reinterpret_cast<Array<source_element, 2> const*>(&source);
NumericArrayConverter<float, source_element, 2, Round> src2float;
packed_tmp[0] = src2float(packed_source[0]);
packed_tmp[1] = src2float(packed_source[1]);
// Convert float to f8
NumericArrayConverterPacked4Element<result_element, float, Round> float2result;
return float2result(tmp);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<cutlass::bfloat16_t, 4> <=> Array<float_e5m2_t, 4>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::bfloat16_t, 4> <= Array<float_e5m2_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<cutlass::bfloat16_t, cutlass::float_e5m2_t, Round> {
using result_element = cutlass::bfloat16_t;
using source_element = cutlass::float_e5m2_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
// Convert f8 to float
NumericArrayConverterPacked4Element<float, source_element, Round> src2float;
Array<float, 4> tmp_floats = src2float(source);
// Convert float to bf16
result_type out;
Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp_floats);
Array<result_element, 2>* packed_out = reinterpret_cast<Array<result_element, 2>*>(&out);
NumericArrayConverter<result_element, float, 2, Round> float2result;
packed_out[0] = float2result(packed_tmp[0]);
packed_out[1] = float2result(packed_tmp[1]);
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e5m2_t, 4> <= Array<cutlass::bfloat16_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e5m2_t, cutlass::bfloat16_t, Round> {
using result_element = cutlass::float_e5m2_t;
using source_element = cutlass::bfloat16_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
// Convert bf16 to float
Array<float, 4> tmp;
Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp);
Array<source_element, 2> const* packed_source = reinterpret_cast<Array<source_element, 2> const*>(&source);
NumericArrayConverter<float, source_element, 2, Round> src2float;
packed_tmp[0] = src2float(packed_source[0]);
packed_tmp[1] = src2float(packed_source[1]);
// Convert float to f8
NumericArrayConverterPacked4Element<result_element, float, Round> float2result;
return float2result(tmp);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<float_e4m3_t, 4> <=> Array<float_e5m2_t, 4>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<float_e4m3_t, 4> <= Array<float_e5m2_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e4m3_t, cutlass::float_e5m2_t, Round> {
using result_element = cutlass::float_e4m3_t;
using source_element = cutlass::float_e5m2_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float_e5m2_t, 4> <= Array<float_e4m3_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverterPacked4Element<float_e5m2_t, cutlass::float_e4m3_t, Round> {
using result_element = cutlass::float_e5m2_t;
using source_element = cutlass::float_e4m3_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for:
// Array<T, N> <=> Array<float_e4m3_t, N>
// Array<T, N> <=> Array<float_e5m2_t, N>
// using packed converter under the hood
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename T,
typename S,
int N,
FloatRoundStyle Round
>
struct PackedNumericArrayConverter {
using result_element = T;
using source_element = S;
using result_type = Array<result_element, N>;
using source_type = Array<source_element, N>;
static FloatRoundStyle const round_style = Round;
private:
using packed_result_type = Array<result_element, 4>;
using packed_source_type = Array<source_element, 4>;
public:
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
result_type result;
packed_result_type* packed_result = reinterpret_cast<packed_result_type*>(&result);
const packed_source_type* packed_source = reinterpret_cast<const packed_source_type*>(&source);
detail::NumericArrayConverterPacked4Element<result_element, source_element, Round> packed_converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 4; ++i) {
packed_result[i] = packed_converter(packed_source[i]);
}
// Handle leftovers
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N % 4; ++i) {
int idx = ((N / 4) * 4) + i;
result[idx] = converter(source[idx]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const{
return convert(s);
}
};
/// Partial specialization for Array<T, N> <= Array<float_e4m3_t, N>
template <
typename T,
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<T, cutlass::float_e4m3_t, N, Round> :
public PackedNumericArrayConverter<T, cutlass::float_e4m3_t, N, Round> {};
/// Partial specialization for Array<T, N> <= Array<float_e5m2_t, N>
template <
typename T,
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<T, cutlass::float_e5m2_t, N, Round> :
public PackedNumericArrayConverter<T, cutlass::float_e5m2_t, N, Round> {};
/// Partial specialization for Array<float_e4m3_t, N> <= Array<S, N>
template <
typename S,
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, S, N, Round> :
public PackedNumericArrayConverter<float_e4m3_t, S, N, Round> {};
/// Partial specialization for Array<float_e5m2_t, N> <= Array<S, N>
template <
typename S,
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e5m2_t, S, N, Round> :
public PackedNumericArrayConverter<float_e5m2_t, S, N, Round> {};
/// Partial specialization for Array<float_e4m3_t, N> <= Array<float_e5m2_t, N>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, cutlass::float_e5m2_t, N, Round> :
public PackedNumericArrayConverter<float_e4m3_t, cutlass::float_e5m2_t, N, Round> {};
/// Partial specialization for Array<float_e5m2_t, N> <= Array<float_e4m3_t, N>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e5m2_t, cutlass::float_e4m3_t, N, Round> :
public PackedNumericArrayConverter<float_e5m2_t, cutlass::float_e4m3_t, N, Round> {};
/// Partial specialization for Array<float_e4m3_t, N> <= Array<float_e4m3_t, N>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, cutlass::float_e4m3_t, N, Round> :
public PackedNumericArrayConverter<float_e4m3_t, cutlass::float_e4m3_t, N, Round> {};
/// Partial specialization for Array<float_e5m2_t, N> <= Array<float_e5m2_t, N>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e5m2_t, cutlass::float_e5m2_t, N, Round> :
public PackedNumericArrayConverter<float_e5m2_t, cutlass::float_e5m2_t, N, Round> {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<int8_t> <= Array<float>
/// Conversion is performed with saturation regardless of setting of
/// the `Round` template parameter.
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, float, 1, Round> {
using result_type = Array<int8_t, 1>;
using source_type = Array<float, 1>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericConverter<int8_t, float, Round> destination_converter;
result_type result;
result[0] = destination_converter(source[0]);
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<uint8_t, float, 1, Round> {
using result_type = Array<uint8_t, 1>;
using source_type = Array<float, 1>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericConverter<uint8_t, float, Round> destination_converter;
result_type result;
result[0] = destination_converter(source[0]);
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
// To convert a FP32 to Int that has less than 32 bits, we need to convert it to int32 first.
template <
typename T,
int N,
FloatRoundStyle Round
>
struct NumericArrayFP32ToIntConverter {
using result_type = Array<T, N>;
using source_type = Array<float, N>;
static FloatRoundStyle const round_style = Round;
static_assert(cutlass::platform::numeric_limits<T>::is_integer, "the dest type has to be int.");
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
// Convert float to int
Array<int32_t, N> temporary;
NumericArrayConverter<int32_t, float, N, Round> compute_converter;
temporary = compute_converter(source);
// Convert to int to int8_t
NumericArrayConverter<T, int32_t, N, Round> destination_converter;
return destination_converter(temporary);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, float, N, Round> {
using result_type = Array<int8_t, N>;
using source_type = Array<float, N>;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayFP32ToIntConverter<int8_t, N, Round> converter;
return converter(source);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<uint8_t, float, N, Round> {
using result_type = Array<uint8_t, N>;
using source_type = Array<float, N>;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayFP32ToIntConverter<uint8_t, N, Round> converter;
return converter(source);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<int4b_t, float, N, Round> {
using result_type = Array<int4b_t, N>;
using source_type = Array<float, N>;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayFP32ToIntConverter<int4b_t, N, Round> converter;
return converter(source);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<uint4b_t, float, N, Round> {
using result_type = Array<uint4b_t, N>;
using source_type = Array<float, N>;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayFP32ToIntConverter<uint4b_t, N, Round> converter;
return converter(source);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750) && \
((__CUDACC_VER_MAJOR__ > 10) || \
((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2)))
/// Partial specialization for Array<int4b_t, 8> <= Array<int, 8>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<int4b_t, int, 8, Round> {
using result_type = Array<int4b_t, 8>;
using source_type = Array<int, 8>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
unsigned out;
asm volatile(
"{ .reg .u32 r4;"
"cvt.pack.sat.s4.s32.b32 r4, %8, %7, 0;"
"cvt.pack.sat.s4.s32.b32 r4, %6, %5, r4;"
"cvt.pack.sat.s4.s32.b32 r4, %4, %3, r4;"
"cvt.pack.sat.s4.s32.b32 %0, %2, %1, r4;"
"}"
: "=r"(out)
: "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3]),
"r"(source[4]), "r"(source[5]), "r"(source[6]), "r"(source[7]));
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<int4b_t> <= Array<int>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<int4b_t, int, N, Round> {
static_assert(!(N % 8), "N must be multiple of 8.");
using result_type = Array<int4b_t, N>;
using source_type = Array<int, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<int4b_t, int, 8, Round> convert_vector_;
result_type result;
Array<int4b_t, 8> *result_ptr = reinterpret_cast<Array<int4b_t, 8> *>(&result);
Array<int, 8> const *source_ptr = reinterpret_cast<Array<int, 8> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 8; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<uint4b_t, 8> <= Array<int, 8>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<uint4b_t, int, 8, Round> {
using result_type = Array<uint4b_t, 8>;
using source_type = Array<int, 8>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
unsigned out;
asm volatile(
"{ .reg .u32 r4;"
"cvt.pack.sat.u4.s32.b32 r4, %8, %7, 0;"
"cvt.pack.sat.u4.s32.b32 r4, %6, %5, r4;"
"cvt.pack.sat.u4.s32.b32 r4, %4, %3, r4;"
"cvt.pack.sat.u4.s32.b32 %0, %2, %1, r4;"
"}"
: "=r"(out)
: "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3]),
"r"(source[4]), "r"(source[5]), "r"(source[6]), "r"(source[7]));
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<int4b_t> <= Array<int>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<uint4b_t, int, N, Round> {
static_assert(!(N % 8), "N must be multiple of 8.");
using result_type = Array<uint4b_t, N>;
using source_type = Array<int, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<uint4b_t, int, 8, Round> convert_vector_;
result_type result;
Array<uint4b_t, 8> *result_ptr = reinterpret_cast<Array<uint4b_t, 8> *>(&result);
Array<int, 8> const *source_ptr = reinterpret_cast<Array<int, 8> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 8; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#endif // Conditional guards to enable partial specialization for packed integers
namespace detail {
/*
A helper class that can vectorize a numeric converter with implementation for several vector widths.
The vector widths must be giving in decreasing order or width, and must be a power of 2.
The vector converters must produce identical results to the scalar converters for consistency.
*/
class VectorizedConverter {
private:
// Base case to handle remainder elements as scalars.
template <int Offset, size_t ParentWidth, typename ArrayConverter>
CUTLASS_DEVICE
static void convert_helper(
typename ArrayConverter::result_type& result,
typename ArrayConverter::source_type const& source) {
using ElementRes = typename ArrayConverter::result_type::Element;
using ElementSrc = typename ArrayConverter::source_type::Element;
// If no more converters, handle the remaining elements as scalars.
constexpr int total_elements = ArrayConverter::result_type::kElements;
constexpr int remainder = total_elements - Offset;
static_assert(remainder == (total_elements % ParentWidth), "Unexpected remainder.");
typename ArrayConverter::ScalarConverter scalar_converter;
CUTLASS_PRAGMA_UNROLL
for (int i = Offset; i < ArrayConverter::result_type::kElements; ++i) {
result[i] = scalar_converter(ElementSrc(source[i]));
}
}
template <int Offset, size_t ParentWidth, typename ArrayConverter, typename ResultVectorArray, typename SourceVectorArray, typename... OtherVectorArrays>
CUTLASS_DEVICE
static void convert_helper(typename ArrayConverter::result_type& result, typename ArrayConverter::source_type const& source) {
static_assert(sizeof...(OtherVectorArrays) % 2 == 0, "Vector converters must come in {dst, src} pairs");
static_assert(ResultVectorArray::kElements == SourceVectorArray::kElements, "Vector converters must have the same vector width");
static_assert(cutlass::platform::is_same<typename ArrayConverter::result_type::Element, typename ResultVectorArray::Element>::value,
"ResultVectorArray must have the same type ArrayConverter::result_type");
static_assert(cutlass::platform::is_same<typename ArrayConverter::source_type::Element, typename SourceVectorArray::Element>::value,
"SourceVectorArray must have the same type ArrayConverter::result_type");
static_assert(Offset >= 0 && Offset <= ArrayConverter::result_type::kElements, "Offset must be between 0 and N");
static_assert(ParentWidth == 0 || ParentWidth > ResultVectorArray::kElements, "Vector arrays must be given in decreasing order of width");
constexpr int vector_width = ResultVectorArray::kElements;
static_assert(ispow2(vector_width), "Vector width must be a power of 2");
using ElementRes = typename ArrayConverter::result_type::Element;
using ElementSrc = typename ArrayConverter::source_type::Element;
constexpr int vector_bits_res = vector_width * cutlass::sizeof_bits<ElementRes>::value;
constexpr int vector_bits_src = vector_width * cutlass::sizeof_bits<ElementSrc>::value;
static_assert(vector_bits_res % 8 == 0, "Result vector type must be byte addressed.");
static_assert(vector_bits_src % 8 == 0, "Source vector type must be byte addressed.");
constexpr int vector_offset = Offset / vector_width;
ResultVectorArray* packed_result_vec = reinterpret_cast<ResultVectorArray*>(&result) + vector_offset;
SourceVectorArray const* packed_source_vec = reinterpret_cast<SourceVectorArray const*>(&source) + vector_offset;
// Convert the remaining elements as vectors.
constexpr int total_elements = ArrayConverter::result_type::kElements;
constexpr int groups_of_vec = (total_elements - Offset) / vector_width;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < groups_of_vec; ++i) {
packed_result_vec[i] = ArrayConverter::template packed_convert<ResultVectorArray, SourceVectorArray>(packed_source_vec[i]);
}
constexpr int new_offset = Offset + vector_width * groups_of_vec;
// Recurse to handle other vector converters, or the scalar base case.
convert_helper<new_offset, ResultVectorArray::kElements, ArrayConverter, OtherVectorArrays...>(result, source);
}
public:
/*
A method to convert vectors of elements using the packed_convert method of the converter.
Converters using this class must implement packed convert and support 1 or more vector conversions.
*/
template <typename ArrayConverter, typename ResultVectorArray, typename SourceVectorArray, typename... OtherVectorArrays>
CUTLASS_DEVICE
static void convert(typename ArrayConverter::result_type& result, typename ArrayConverter::source_type const& source) {
convert_helper<0, 0, ArrayConverter, ResultVectorArray, SourceVectorArray, OtherVectorArrays...>(result, source);
}
};
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::float_e4m3_t, N> <= Array<cutlass::int4b_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<cutlass::float_e4m3_t, cutlass::int4b_t, N, Round> {
using result_type = Array<cutlass::float_e4m3_t, N>;
using source_type = Array<cutlass::int4b_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_8 = Array<cutlass::float_e4m3_t, 8>;
using result_type_packed_4 = Array<cutlass::float_e4m3_t, 4>;
using source_type_packed_8 = Array<cutlass::int4b_t, 8>;
using source_type_packed_4 = Array<cutlass::int4b_t, 4>;
using ScalarConverter = NumericConverter<cutlass::float_e4m3_t, cutlass::int4b_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_8 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
// The core converter uses a lookup table to converts i4 -> e4m3.
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_8>::value &&
platform::is_same<PackedResultType, result_type_packed_8>::value),
"Invalid PackedSrcType/PackedResultType must be 4 or 8 to use private convert dispatch.");
// Hold FP8 outputs in reg. We need 1 reg for every 4 outputs.
cutlass::AlignedArray<uint32_t, PackedResultType::kElements / 4, sizeof(PackedResultType)> r;
// View the input as reg
uint32_t reg = to_reg(source);
// Determines if to get from the signed or unsigned candidates
uint32_t sign = (reg & 0x88888888) >> 1;
// Ignore sign bit when indexing into LUT
uint32_t lut_idx = (reg & 0x77777777);
// Signed is OR'd with 0x32103210 to find the correct value in the LUT
const uint32_t final_prmt_base = 0x32103210;
// [0, 1, 2, 3] encoded as FP8
static constexpr uint32_t POS_E4M3s_REG1 = 0x44403800;
// [4, 5, 6, 7] encoded as FP8
static constexpr uint32_t POS_E4M3s_REG2 = 0x4E4C4A48;
// [-1, -2, -3, -4] encoded as FP8
static constexpr uint32_t NEG_E4M3s_REG1 = 0xCACCCED0;
// [-5, -6, -7, -7] encoded as FP8
static constexpr uint32_t NEG_E4M3s_REG2 = 0xB8C0C4C8;
const int iters = PackedSrcType::kElements / 4;
#pragma unroll
for (int ii = 0; ii < iters; ++ii, lut_idx >>=16, sign >>=16) {
uint32_t final_prmt_idx = final_prmt_base | sign;
// This uses a look up table to convert packed int4s to packed fp8s, using the int4 value
// as the index to prmt.
// It first select both the positive and negative candidates, then uses the sign bit to
// select the correct candidate.
asm volatile(
"{\n"
" .reg .b32 pos_f8s, neg_f8s;\n"
" prmt.b32 pos_f8s, %1, %2, %5;\n"
" prmt.b32 neg_f8s, %3, %4, %5;\n"
" prmt.b32 %0, pos_f8s, neg_f8s, %6;\n"
"}\n"
: "=r"(r[ii])
: "n"(POS_E4M3s_REG1), "n"(POS_E4M3s_REG2), "n"(NEG_E4M3s_REG1), "n"(NEG_E4M3s_REG2),
"r"(lut_idx), "r"(final_prmt_idx));
}
return reinterpret_cast<PackedResultType&>(r);
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_8, source_type_packed_8,
result_type_packed_4, source_type_packed_4>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float, N> <= Array<cutlass::int4b_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<float, cutlass::int4b_t, N, Round> {
using result_type = Array<float, N>;
using source_type = Array<cutlass::int4b_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_8 = Array<float, 8>;
using result_type_packed_4 = Array<float, 4>;
using result_type_packed_2 = Array<float, 2>;
using source_type_packed_8 = Array<cutlass::int4b_t, 8>;
using source_type_packed_4 = Array<cutlass::int4b_t, 4>;
using source_type_packed_2 = Array<cutlass::int4b_t, 2>;
using ScalarConverter = NumericConverter<float, cutlass::int4b_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint8_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_8 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
template <int offset, int elements_to_convert, typename PackedResultType>
CUTLASS_DEVICE
static void packed_convert_vec(PackedResultType& result, uint32_t src_reg) {
static_assert(offset == 0 || offset == 4, "Invalid offset");
// Selects one of the bottom int4s and constructs:
// 8388608 + (x + 8)
// 8388608 + 16 * (x + 8)
// 8388608 + 256 * (x + 8)
// 8388608 + 4096 * (x + 8)
uint32_t const and_masks[4] = {0x0000000F, 0x000000F0, 0x00000F00, 0x0000F000};
uint32_t const xor_masks[4] = {0x4B000008, 0x4B000080, 0x4B000800, 0x4B008000};
float const scales[4] = {1.f, 1.f / 16.f, 1.f / 256.f, 1.f / 4096.f};
float const offsets[4] = {-8388616.f, -524296.f, -32776.f, -2056.f};
static constexpr uint32_t immLut = (0xf0 & 0xcc) ^ 0xaa;
uint32_t* result_as_int = reinterpret_cast<uint32_t*>(&result);
// For each operand, computes:
// r[i] = (r[i] & and_mask) ^ xor_mask
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < elements_to_convert; ++ii) {
asm volatile(
"{\n"
" lop3.b32 %0, %1, %2, %3, %4;\n"
"}\n"
: "=r"(result_as_int[offset + ii])
: "r"(src_reg), "r"(and_masks[ii]), "r"(xor_masks[ii]), "n"(immLut));
result[offset + ii] = __fmaf_rn(result[offset + ii], scales[ii], offsets[ii]);
}
}
// The core converter uses bit tricks to construct a known FP16 number, then does a
// subtraction in FP16 for the final result.
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_8>::value &&
platform::is_same<PackedResultType, result_type_packed_8>::value),
"Invalid PackedSrcType/PackedResultType must be 1, 2, 4 or 8 to use private convert dispatch.");
// Hold output FP16s in reg. We need 1 reg for every 2 elements
PackedResultType r;
// View the input as reg
uint32_t src_reg = to_reg(source);
constexpr int total_elements = PackedResultType::kElements == 8 ? 4 : PackedResultType::kElements;
packed_convert_vec<0, total_elements>(r, src_reg);
if (PackedResultType::kElements == 8) {
uint32_t src_reg_shifted = src_reg >> 16;
packed_convert_vec<4, 4>(r, src_reg_shifted);
}
return r;
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_8, source_type_packed_8,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float, N> <= Array<int8_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<float, int8_t, N, Round> {
using result_type = Array<float, N>;
using source_type = Array<int8_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_4 = Array<float, 4>;
using result_type_packed_2 = Array<float, 2>;
using source_type_packed_4 = Array<int8_t, 4>;
using source_type_packed_2 = Array<int8_t, 2>;
using ScalarConverter = NumericConverter<float, int8_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value),
"Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch.");
PackedResultType r;
// View the input as reg
uint32_t src_reg = to_reg(source);
static constexpr int fp32_base = 0x4B400000;
uint32_t const prmt_indices[4] = {0x8880, 0x9991, 0xAAA2, 0xBBB3};
int* result_as_int = reinterpret_cast<int*>(&r);
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < PackedResultType::kElements; ++ii) {
asm volatile("prmt.b32 %0,%1,%1,%2;\n" : "=r"(result_as_int[ii]) : "r"(src_reg), "r"(prmt_indices[ii]));
}
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < PackedResultType::kElements; ++ii)
{
result_as_int[ii] += fp32_base;
r[ii] -= reinterpret_cast<const float&>(fp32_base);
}
return r;
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<float, N> <= Array<uint8_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<float, uint8_t, N, Round> {
using result_type = Array<float, N>;
using source_type = Array<uint8_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_4 = Array<float, 4>;
using result_type_packed_2 = Array<float, 2>;
using source_type_packed_4 = Array<uint8_t, 4>;
using source_type_packed_2 = Array<uint8_t, 2>;
using ScalarConverter = NumericConverter<float, uint8_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value),
"Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch.");
PackedResultType r;
// View the input as reg
uint32_t src_reg = to_reg(source);
// __byte_perm simulates the add.u32 0x4B000000 to every u8 element of u8x4 source and stores
// the result in r (without introducing extra cvt.u32.u8 instruction)
uint32_t const prmt_indices[4] = {0x7650, 0x7651, 0x7652, 0x7653};
uint32_t* result_as_int = reinterpret_cast<uint32_t*>(&r);
for (int ii = 0; ii < PackedResultType::kElements; ++ii) {
result_as_int[ii] = __byte_perm(src_reg, 0x4B000000, prmt_indices[ii]);
// Subtract the magic number 0x4B000000 from tmp in floating-point arithmetic to obtain final result
r[ii] -= 8388608.f;
}
return r;
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::half_t, N> <= Array<cutlass::int4b_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<cutlass::half_t, cutlass::int4b_t, N, Round> {
using result_type = Array<cutlass::half_t, N>;
using source_type = Array<cutlass::int4b_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_8 = Array<cutlass::half_t, 8>;
using result_type_packed_4 = Array<cutlass::half_t, 4>;
using result_type_packed_2 = Array<cutlass::half_t, 2>;
using source_type_packed_8 = Array<cutlass::int4b_t, 8>;
using source_type_packed_4 = Array<cutlass::int4b_t, 4>;
using source_type_packed_2 = Array<cutlass::int4b_t, 2>;
using ScalarConverter = NumericConverter<cutlass::half_t, cutlass::int4b_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint8_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_8 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
// The core converter uses bit tricks to construct a known FP16 number, then does a
// subtraction in FP16 for the final result.
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_8>::value &&
platform::is_same<PackedResultType, result_type_packed_8>::value),
"Invalid PackedSrcType/PackedResultType must be 2, 4 or 8 to use private convert dispatch.");
// Hold output FP16s in reg. We need 1 reg for every 2 elements
using RegArray = cutlass::AlignedArray<uint32_t, PackedResultType::kElements / 2, sizeof(PackedResultType)>;
RegArray r;
// View the input as reg
uint32_t src_reg = to_reg(source);
// Below constructs the following temporary:
// fp16s_01 = {0x00, i4_01, 0x00, i4_01}
// fp16s_23 = {0x00, i4_23, 0x00, i4_23}
// fp16s_45 = {0x00, i4_45, 0x00, i4_45}
// fp16s_67 = {0x00, i4_67, 0x00, i4_67}
// We use inline asm instead of __byte_perm intrinsic since we don't want the documented (& 0x7) on the index. NVCC
// might be able to optimize it out since the index is a constexpr, but we choose to be safe about it here.
uint32_t prmt_indices[4] = {0x4040, 0x4141, 0x4242, 0x4343};
static_assert(RegArray::kElements <= 4, "Too many inputs for F16 -> I4 vector converter");
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
asm volatile(
"{\n"
" prmt.b32 %0, %1, %2, %3;\n"
"}\n"
: "=r"(r[ii])
: "r"(src_reg), "n"(0), "r"(prmt_indices[ii]));
}
// The below XOR does the following:
// 1) Sets the exponent bits of the FP16 to the correct value for the FP16 magic_num. We will be constructing
// 1024 + x + 8 OR 1024 + 16 * (x + 8), then using hfma to subtract 1032 from that
// 2) Adds 8 to the int4 value that we will process in the FP16 (for uint4, we can simply avoid this step)
// The AND does the following:
// 1) Clear the set bits for the int4 we will ignore.
// We use lop3 so that we can use 1 instruction for AND and XOR.
static constexpr uint32_t xor_mask = 0x64806408;
static constexpr uint32_t and_mask = 0xFFF0FF0F;
static constexpr uint32_t immLut = (0xf0 & 0xcc) ^ 0xaa;
// For each operand, computes:
// r[i] = (r[i] & and_mask) ^ xor_mask
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
asm volatile(
"{\n"
" lop3.b32 %0, %0, %1, %2, %3;\n"
"}\n"
: "+r"(r[ii])
: "n"(and_mask), "n"(xor_mask), "n"(immLut));
}
// We will issue 2 hfmas that do the following:
// For the high FP16:
// Divide by 16 {packed as a operand} to get:
// 64 + (x + 8)
// x + 72
// Subtract 72 {packed as c operand} to get x
// For the low FP16:
// 1024 + (x + 8)
// x + 1032
// So, we subtract 1032 {packed as c operand} to get x
// {-72, -1032}
static constexpr uint32_t hfma_bias_rep = 0xD480E408;
// {1 / 16, 1}
static constexpr uint32_t hfma_scale_rep = 0x2C003C00;
const half2& hfma_bias = reinterpret_cast<const half2&>(hfma_bias_rep);
const half2& hfma_scale = reinterpret_cast<const half2&>(hfma_scale_rep);
// Scale and subtract the FP16s to get the original int4 number as FP16.
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
half2& fp16x2_val = reinterpret_cast<__half2&>(r[ii]);
fp16x2_val = __hfma2(hfma_scale, fp16x2_val, hfma_bias);
}
return reinterpret_cast<PackedResultType&>(r);
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_8, source_type_packed_8,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<cutlass::half_t, N> <= Array<int8_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<cutlass::half_t, int8_t, N, Round> {
using result_type = Array<cutlass::half_t, N>;
using source_type = Array<int8_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_4 = Array<cutlass::half_t, 4>;
using result_type_packed_2 = Array<cutlass::half_t, 2>;
using source_type_packed_4 = Array<int8_t, 4>;
using source_type_packed_2 = Array<int8_t, 2>;
using ScalarConverter = NumericConverter<cutlass::half_t, int8_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
// The core converter uses bit tricks to construct a known FP16 number, then does a
// subtraction in FP16 for the final result.
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value),
"Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch.");
// Hold output FP16s in reg. We need 1 reg for every 2 elements
using RegArray = cutlass::AlignedArray<uint32_t, PackedResultType::kElements / 2, sizeof(PackedResultType)>;
RegArray r;
#if 0 // Scalar conversion (Please keep this code for reference for vectorized version below)
auto result = reinterpret_cast<PackedResultType&>(r);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < PackedResultType::kElements; ++i) {
int16_t tmp = source[i] + 26112 /* 0x6600 */;
result[i] = reinterpret_cast<cutlass::half_t const &>(tmp) - 1536.0_hf;
}
#endif
// View the input as reg
uint32_t src_reg = to_reg(source);
uint32_t const prmt_indices[2] = {0x9180, 0xB3A2};
// Pack s8x2 (s8[1], s8[0]) -> s16x2 (sext.s8[1], sext.s8[0])
// (See https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-prmt)
// The inline ptx below uses `msb=0` and `msb=1` from the above link to sign-extend the sign bit in 0, 1, 2, 3 bytes of s8x4
// into result_ptr[0] and result_ptr[1]'s 08-15 and 24-31 bits, respectively.
// Note that `__byte_perm(source_ptr[0], source_ptr[0], 0x9180);` won't achieve the same result and doesn't sign-extend the sign bit.
// Thus, we use inline ptx `prmt.b32` instruction for the desired sign extend from s8x2 to s16x2.
for (int ii = 0; ii < RegArray::kElements; ++ii) {
asm volatile("prmt.b32 %0,%1,%1,%2;\n" : "=r"(r[ii]) : "r"(src_reg), "r"(prmt_indices[ii]));
}
// In the absense of add.s16x2 instruction, use bit-wise operation to execute signed addition with magic numbers to achieve
// the same result as add.s16x2 instruction.
// (See https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#logic-and-shift-instructions-lop3)
// For a logical operation F(a, b, c) the value of kImmLut can be computed by applying the same operation to
// three predefined constant values as follows:
// ta = 0xF0;
// tb = 0xCC;
// tc = 0xAA;
// kImmLut = F(ta, tb, tc);
// If we want F = ((a & b) ^ c) then set kImmLut = (0xF0 & 0xCC) ^ 0xAA
static constexpr uint32_t kImmLut = (0xF0 & 0xCC) ^ 0xAA;
for (int ii = 0; ii < RegArray::kElements; ++ii) {
// The bit-wise operation executed below is `r[ii] = (r[ii] & 0x03FF03FF) ^ 0x66006600;`
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n" :
"=r"(r[ii]) : "r"(r[ii]), "n"(0x03FF03FF), "n"(0x66006600), "n"(kImmLut));
}
static constexpr uint32_t bias_rep = 0x66006600;
const half2& bias = reinterpret_cast<const half2&>(bias_rep);
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
half2& fp16x2_val = reinterpret_cast<__half2&>(r[ii]);
fp16x2_val = __hsub2(fp16x2_val, bias);
}
return reinterpret_cast<PackedResultType&>(r);
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<cutlass::half_t, N> <= Array<uint8_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<cutlass::half_t, uint8_t, N, Round> {
using result_type = Array<cutlass::half_t, N>;
using source_type = Array<uint8_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_4 = Array<cutlass::half_t, 4>;
using result_type_packed_2 = Array<cutlass::half_t, 2>;
using source_type_packed_4 = Array<uint8_t, 4>;
using source_type_packed_2 = Array<uint8_t, 2>;
using ScalarConverter = NumericConverter<cutlass::half_t, uint8_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value),
"Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch.");
// Hold output FP16s in reg. We need 1 reg for every 2 elements
using RegArray = cutlass::AlignedArray<uint32_t, PackedResultType::kElements / 2, sizeof(PackedResultType)>;
RegArray r;
// View the input as reg
uint32_t src_reg = to_reg(source);
uint32_t const prmt_indices[2] = {0x5150, 0x5352};
static constexpr uint32_t start_byte_for_fp16 = 0x64646464;
for (int ii = 0; ii < RegArray::kElements; ++ii) {
asm volatile("prmt.b32 %0,%1,%2,%3;\n" : "=r"(r[ii]) : "r"(src_reg), "n"(start_byte_for_fp16), "r"(prmt_indices[ii]));
}
static constexpr uint32_t bias_rep = 0x64006400;
const half2& bias = reinterpret_cast<const half2&>(bias_rep);
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
half2& fp16x2_val = reinterpret_cast<__half2&>(r[ii]);
fp16x2_val = __hsub2(fp16x2_val, bias);
}
return reinterpret_cast<PackedResultType&>(r);
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<cutlass::bfloat16_t, N> <= Array<cutlass::int4b_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<cutlass::bfloat16_t, cutlass::int4b_t, N, Round> {
using result_type = Array<cutlass::bfloat16_t, N>;
using source_type = Array<cutlass::int4b_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_8 = Array<cutlass::bfloat16_t, 8>;
using result_type_packed_4 = Array<cutlass::bfloat16_t, 4>;
using result_type_packed_2 = Array<cutlass::bfloat16_t, 2>;
using source_type_packed_8 = Array<cutlass::int4b_t, 8>;
using source_type_packed_4 = Array<cutlass::int4b_t, 4>;
using source_type_packed_2 = Array<cutlass::int4b_t, 2>;
using ScalarConverter = NumericConverter<cutlass::bfloat16_t, cutlass::int4b_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint8_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_8 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
// The core converter uses bit tricks to construct a known FP16 number, then does a
// subtraction in FP16 for the final result.
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_8>::value &&
platform::is_same<PackedResultType, result_type_packed_8>::value),
"Invalid PackedSrcType/PackedResultType must be 2, 4 or 8 to use private convert dispatch.");
// Hold output FP16s in reg. We need 1 reg for every 2 elements
using RegArray = cutlass::AlignedArray<uint32_t, PackedResultType::kElements / 2, sizeof(PackedResultType)>;
RegArray r;
// View the input as reg
uint32_t src_reg = to_reg(source);
uint32_t src_reg_shifted = src_reg >> 4;
// Below constructs the following temporary:
uint32_t const prmt_indices[4] = {0xF4F0, 0xF5F1, 0xF6F2, 0xF7F3};
static_assert(RegArray::kElements <= 4, "Too many inputs for BF16 -> I4 vector converter");
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
asm volatile(
"{\n"
" prmt.b32 %0, %1, %2, %3;\n"
"}\n"
: "=r"(r[ii])
: "r"(src_reg), "r"(src_reg_shifted), "r"(prmt_indices[ii]));
}
// The below XOR does the following:
// 1) Sets the exponent bits of the FP16 to the correct value for the FP16 magic_num. We will be constructing
// 128 + (x + 8) and subtracting 136 to get x
static constexpr uint32_t xor_mask = 0x43084308;
static constexpr uint32_t and_mask = 0x000F000F;
static constexpr uint32_t immLut = (0xf0 & 0xcc) ^ 0xaa;
// For each operand, computes:
// r[i] = (r[i] & and_mask) ^ xor_mask
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
asm volatile(
"{\n"
" lop3.b32 %0, %0, %1, %2, %3;\n"
"}\n"
: "+r"(r[ii])
: "n"(and_mask), "n"(xor_mask), "n"(immLut));
}
// We will issue 2 bfmas that do the following:
// high BF16:
// hi_bf16 - 136, lo_bf16 - 136
// This is the BF16 {136, 136} represented as an integer.
static constexpr uint32_t bias_rep = 0x43084308;
const __nv_bfloat162& bias = reinterpret_cast<const __nv_bfloat162&>(bias_rep);
CUTLASS_PRAGMA_UNROLL
for (int ii = 0; ii < RegArray::kElements; ++ii) {
__nv_bfloat162& bf16x2_val = reinterpret_cast<__nv_bfloat162&>(r[ii]);
bf16x2_val = __hsub2(bf16x2_val, bias);
}
return reinterpret_cast<PackedResultType&>(r);
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_8, source_type_packed_8,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<cutlass::bfloat16_t, N> <= Array<int8_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<cutlass::bfloat16_t, int8_t, N, Round> {
using result_type = Array<cutlass::bfloat16_t, N>;
using source_type = Array<int8_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_4 = Array<cutlass::bfloat16_t, 4>;
using result_type_packed_2 = Array<cutlass::bfloat16_t, 2>;
using source_type_packed_4 = Array<int8_t, 4>;
using source_type_packed_2 = Array<int8_t, 2>;
using ScalarConverter = NumericConverter<cutlass::bfloat16_t, int8_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value),
"Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch.");
NumericArrayConverter<float, int8_t, PackedResultType::kElements, Round> convert_int8_to_f32;
Array<float, PackedResultType::kElements> tmp = convert_int8_to_f32(source);
NumericArrayConverter<cutlass::bfloat16_t, float, PackedResultType::kElements, Round> convert_f32_to_bf16;
return convert_f32_to_bf16(tmp);
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
/// Partial specialization for Array<cutlass::bfloat16_t, N> <= Array<uint8_t, N>
template <FloatRoundStyle Round, int N>
struct NumericArrayConverter<cutlass::bfloat16_t, uint8_t, N, Round> {
using result_type = Array<cutlass::bfloat16_t, N>;
using source_type = Array<uint8_t, N>;
static FloatRoundStyle const round_style = Round;
private:
using result_type_packed_4 = Array<cutlass::bfloat16_t, 4>;
using result_type_packed_2 = Array<cutlass::bfloat16_t, 2>;
using source_type_packed_4 = Array<uint8_t, 4>;
using source_type_packed_2 = Array<uint8_t, 2>;
using ScalarConverter = NumericConverter<cutlass::bfloat16_t, uint8_t, Round>;
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_2 const& source) {
return static_cast<uint32_t>(
reinterpret_cast<const uint16_t&>(source));
}
CUTLASS_DEVICE
static uint32_t to_reg(source_type_packed_4 const& source) {
return reinterpret_cast<const uint32_t&>(source);
}
template <typename PackedResultType, typename PackedSrcType>
CUTLASS_DEVICE
static PackedResultType packed_convert(PackedSrcType const &source) {
static_assert((platform::is_same<PackedSrcType, source_type_packed_2>::value &&
platform::is_same<PackedResultType, result_type_packed_2>::value) ||
(platform::is_same<PackedSrcType, source_type_packed_4>::value &&
platform::is_same<PackedResultType, result_type_packed_4>::value),
"Invalid PackedSrcType/PackedResultType must be 2 or 4 to use private convert dispatch.");
NumericArrayConverter<float, uint8_t, PackedResultType::kElements, Round> convert_uint8_to_f32;
Array<float, PackedResultType::kElements> tmp = convert_uint8_to_f32(source);
NumericArrayConverter<cutlass::bfloat16_t, float, PackedResultType::kElements, Round> convert_f32_to_bf16_;
return convert_f32_to_bf16_(tmp);
}
friend class detail::VectorizedConverter;
public:
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
using ConverterType = NumericArrayConverter<typename result_type::Element, typename source_type::Element, N, Round>;
detail::VectorizedConverter::convert<ConverterType,
result_type_packed_4, source_type_packed_4,
result_type_packed_2, source_type_packed_2>(result, source);
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const {
return convert(s);
}
};
#endif // defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// FastNumericArrayConverter only works when the source is within center range.
/// Conversion operator for Array. See the comments before
/// FastLinearCombinationClamp.
template <typename T, typename S, int N,
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest,
typename Enable = void>
struct FastNumericArrayConverter {
using result_type = Array<T, N>;
using source_type = Array<S, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const &s) {
NumericArrayConverter<T, S, N, Round> convert_;
return convert_(s);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const { return convert(s); }
};
/// Partial specialization for Array<float> <= Array<int>
template <int N, FloatRoundStyle Round>
struct FastNumericArrayConverter<float, int, N, Round> {
using result_type = Array<float, N>;
using source_type = Array<int, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
int tmp = source[i] + 1262485504 /*0x4B400000*/;
result[i] = reinterpret_cast<float const &>(tmp) - 12582912.0f;
}
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const { return convert(s); }
};
/// Partial specialization for Array<int8_t, 4> <= Array<float, 4>
template <FloatRoundStyle Round>
struct FastNumericArrayConverter<int8_t, float, 4, Round> {
using result_type = Array<int8_t, 4>;
using source_type = Array<float, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
Array<int32_t, 4> result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
float tmp = source[i] + 12582912.0f;
result[i] = reinterpret_cast<int32_t const &>(tmp);
}
result[0] = __byte_perm(result[0], result[1], 0x40);
result[2] = __byte_perm(result[2], result[3], 0x40);
result[0] = __byte_perm(result[0], result[2], 0x5410);
return reinterpret_cast<result_type const &>(result[0]);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const { return convert(s); }
};
/// Partial specialization for Array<int8_t> <= Array<float>
template <int N, FloatRoundStyle Round>
struct FastNumericArrayConverter<int8_t, float, N, Round> {
static_assert(!(N % 4), "N must be multiple of 4.");
using result_type = Array<int8_t, N>;
using source_type = Array<float, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
FastNumericArrayConverter<int8_t, float, 4, Round> convert_vector_;
result_type result;
Array<int8_t, 4> *result_ptr =
reinterpret_cast<Array<int8_t, 4> *>(&result);
Array<float, 4> const *source_ptr =
reinterpret_cast<Array<float, 4> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 4; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) const { return convert(s); }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines preferred rounding mode for a pair of types
template <typename T, typename S>
struct PreferredRoundingMode {
static FloatRoundStyle const kRound = FloatRoundStyle::round_to_nearest;
};
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 900
/// Defines preferred rounding mode for a pair of types
template <>
struct PreferredRoundingMode<cutlass::tfloat32_t, float> {
static FloatRoundStyle const kRound = FloatRoundStyle::round_half_ulp_truncate;
};
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Packs predicates into an array.
template <int N>
struct PackPredicates {
using result_type = Array<uint1b_t, N>;
static_assert(!(N % 4), "Must pack predicates in a count that is a multiple of 4");
CUTLASS_HOST_DEVICE
result_type operator()(bool const predicates[]) {
result_type packed;
packed.clear();
int const kWordSize = 8;
uint8_t *bytes = reinterpret_cast<uint8_t *>(packed.data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
int word_idx = (i / kWordSize);
int bit_idx = (i % kWordSize);
uint8_t mask = static_cast<uint8_t>((predicates[i] ? 1u : 0u) << bit_idx);
bytes[word_idx] = (bytes[word_idx] | mask);
}
return packed;
}
};
/// Packs predicates into an array
template <int N>
struct UnpackPredicates {
using result_type = Array<uint1b_t, N>;
static_assert(!(N % 4), "Must unpack predicates in a count that is a multiple of 4");
CUTLASS_HOST_DEVICE
void operator()(bool predicates[], result_type const &packed) {
int const kWordSize = 8;
uint8_t const *bytes = reinterpret_cast<uint8_t const *>(packed.data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
int word_idx = (i / kWordSize);
int bit_idx = (i % kWordSize);
predicates[i] = bool((bytes[word_idx] >> bit_idx) & 0x1);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/numeric_conversion.h/0 | {
"file_path": "include/cutlass/numeric_conversion.h",
"repo_id": "include",
"token_count": 50524
} | 37 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Kernel performing a reduction over one or more ranks of an affine tensor
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/fast_math.h"
#include "cutlass/numeric_types.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/device_kernel.h"
#include "cutlass/reduction/thread/reduction_operators.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reduction {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Parameters structure
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (i.e. number of outer ranks)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
struct TensorReductionAffineContiguousParams {
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
Coord<kRank> extent; /// Extent of source tensor
FastDivmodU64 divmod[kRank - 1]; /// FastDivmod by each strided rank
int64_t dst_stride[kReducedRank]; /// stride (units of bytes) - I, J
int64_t src_stride[kRank - 1]; /// stride (units of bytes) - I, J, K
int64_t workspace_stride; /// stride (units of bytes) between workspace
int workspace_count; /// number of workspaces
uint64_t inner_count; /// Number of elements in reduced index space
uint64_t outer_count; /// Number of elements in outer index space
ElementOutput * destination; /// Pointer to output tensor of rank kReducedRank
ElementSource const * source; /// Pointer to source pointer of rank kRank
ReductionOp reduction_op; /// Reduction operator
ElementCompute reduction_identity; /// Identity element used by reduction operator
ElementCompute *device_workspace; /// Pointer to device workspace for inter-CTA reductions
//
// Methods
//
/// Ctor
CUTLASS_HOST_DEVICE
TensorReductionAffineContiguousParams() {
}
/// Ctor
TensorReductionAffineContiguousParams(
Coord<kRank> extent_, ///< Extent of source tensor
ElementOutput * dst_ptr_, ///< Output tensor data
int64_t dst_stride_[], ///< Stride (units of elements)
ElementSource const * src_ptr_, ///< Source tensor data
int64_t src_stride_[], ///< Stride (units of elements)
ElementCompute *device_workspace_, ///< Pointer to device workspace for inter-CTA reductions
int64_t workspace_stride_, ///< Stride between workspaces
int workspace_count_, ///< Number of workspaces
ReductionOp reduction_op_, ///< Reduction operator
ElementCompute reduction_identity_ = ElementCompute() ///< Identity element used by reduction operator
):
extent(extent_),
inner_count(1),
outer_count(1),
destination(dst_ptr_),
source(src_ptr_),
device_workspace(device_workspace_),
workspace_stride(workspace_stride_),
workspace_count(workspace_count_),
reduction_op(reduction_op_),
reduction_identity(reduction_identity_) {
// Initialize divisors for fast div-mod
for (int p = 1; p < kRank; ++p) {
divmod[p - 1] = FastDivmodU64(uint64_t(extent[p]));
}
int input_size_bits = sizeof_bits<ElementSource>::value;
int output_size_bits = sizeof_bits<ElementOutput>::value;
// Compute strides in units of bytes
for (int p = 0; p < kReducedRank; ++p) {
dst_stride[p] = dst_stride_[p] * output_size_bits / 8;
}
for (int p = 0; p < kRank - 1; ++p) {
src_stride[p] = src_stride_[p] * input_size_bits / 8;
}
// Compute number of elements in strided ranks
for (int p = 0; p < kReducedRank; ++p) {
outer_count *= uint64_t(extent[p]);
}
for (int p = 0; p < kInnerRank; ++p) {
inner_count *= uint64_t(extent[kRank - 1 - p]);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to reduce a tensor with affine layout over a set of ranks *INCLUDING* the contiguous
/// rank. This leads to favorable vectorized memory accesses over the contiguous rank.
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
class TensorReductionAffineContiguous {
public:
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
using ComputeFragment = Array<ElementCompute, VectorLength>;
using SourceFragment = AlignedArray<ElementSource, VectorLength>;
using OutputFragment = AlignedArray<ElementOutput, VectorLength>;
/// Shared memory allocation used for reduction within the CTA
struct SharedStorage {
Array<ElementCompute, kThreads * kVectorLength> workspace;
};
/// Parameters structure
using Params = TensorReductionAffineContiguousParams<
Rank,
ReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
VectorLength,
ElementCompute,
Threads,
BatchSize
>;
private:
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_inner_coord_and_offset_(
Params const ¶ms,
Coord<kInnerRank> & coord,
int64_t &src_offset,
uint64_t linear_idx) const {
// Decompose into a coordinate of rank <kInnerRank>
coord = CoordinateDecomposition<kInnerRank>(linear_idx, ¶ms.divmod[kRank - kInnerRank]);
// Compute an offset using the souce stride
src_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kInnerRank - 1; ++i) {
src_offset += coord[i] * params.src_stride[kReducedRank + i];
}
src_offset += coord[kInnerRank - 1] * sizeof_bits<ElementSource>::value / 8;
}
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_outer_coord_and_offset_(
Params const ¶ms,
Coord<kReducedRank> & coord,
int64_t &dst_offset,
int64_t &src_offset,
uint64_t linear_idx) const {
// Decompose into coordinate of rank <kReducedRank>
coord = CoordinateDecomposition<kReducedRank>(linear_idx, params.divmod);
// Compute offsets using destination and source strides
dst_offset = 0;
src_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kReducedRank; ++i) {
dst_offset += params.dst_stride[i] * coord[i];
src_offset += params.src_stride[i] * coord[i];
}
}
/// Reduces over the reduction indices yielding a single element
CUTLASS_DEVICE
ElementCompute reduce_indices_(
Params const ¶ms,
ElementCompute *threadblock_workspace,
char const *src_byte_ptr,
int coord_c) {
NumericArrayConverter<ElementCompute, ElementSource, VectorLength> convert_source;
ReductionOp reduction_op(params.reduction_op);
//
// Early exit or initialize to identity element
//
if (!params.inner_count) {
return params.reduction_identity;
}
ComputeFragment accumulator;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < int(accumulator.size()); ++i) {
accumulator[i] = params.reduction_identity;
}
// Compute the coordinate of the first access
int64_t src_byte_offset = 0;
Coord<kInnerRank> coord;
uint64_t linear_idx = (threadIdx.x + blockDim.x * threadIdx.z + blockDim.x * blockIdx.z * blockDim.z) * kVectorLength;
compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx);
// Load the first vector
SourceFragment source_fragment[kBatchSize];
bool not_done = true;
// Iterate over vectors in a linearized reduction index space
while (not_done) {
bool guards[kBatchSize];
// Issue a batch of loads
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
if (linear_idx < params.inner_count) {
source_fragment[b] = *reinterpret_cast<SourceFragment const *>(src_byte_ptr + src_byte_offset);
guards[b] = true;
}
else {
guards[b] = false;
not_done = false;
}
linear_idx += (blockDim.z * gridDim.z * blockDim.x) * kVectorLength;
compute_inner_coord_and_offset_(params, coord, src_byte_offset, linear_idx);
}
// Perform a batch of reduction operations
CUTLASS_PRAGMA_UNROLL
for (int b = 0; b < kBatchSize; ++b) {
if (guards[b]) {
auto cvt = convert_source(source_fragment[b]);
accumulator = cutlass::reduction::thread::detail::ApplyArrayOperator(
reduction_op,
accumulator,
cvt);
}
}
};
//
// Reduction of vectors to scalar
//
ElementCompute reduced_accumulator = accumulator[0];
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < kVectorLength; ++i) {
reduced_accumulator = reduction_op(reduced_accumulator, accumulator[i]);
}
//
// Reduction within CTA across threadIdx.xz => threadIdx{.x = 0, .z = 0}
//
// This re-arranges data so threadIdx.y is effectively a row index and threadIdx.xz is a column
//
int thread_count = blockDim.x * blockDim.z;
int thread_j = threadIdx.x + blockDim.x * threadIdx.z;
int thread_i = threadIdx.y;
ElementCompute *frag_ptr = reinterpret_cast<ElementCompute *>(threadblock_workspace) + thread_i * thread_count;
frag_ptr[thread_j] = reduced_accumulator;
//
// Reduce
//
CUTLASS_PRAGMA_NO_UNROLL
while (thread_count > 1) {
thread_count /= 2;
__syncthreads();
if (thread_j < thread_count) {
ElementCompute other = frag_ptr[thread_j + thread_count];
reduced_accumulator = reduction_op(reduced_accumulator, other);
frag_ptr[thread_j] = reduced_accumulator;
}
__syncthreads();
}
return reduced_accumulator;
}
public:
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
int coord_c = (blockIdx.x * blockDim.x + threadIdx.x) * kVectorLength;
char const * src_byte_ptr = reinterpret_cast<char const *>(params.source);
char * dst_byte_ptr = nullptr;
// If performing a reduction across CTAs, redirect output to device workspace
if (gridDim.z == 1) {
dst_byte_ptr = reinterpret_cast<char *>(params.destination);
}
else {
dst_byte_ptr = reinterpret_cast<char *>(params.device_workspace);
}
uint64_t idx_linear = blockIdx.y * blockDim.y + threadIdx.y;
// Use modulo division to compute location
Coord<kReducedRank> outer_coord;
int64_t dst_byte_offset;
int64_t src_byte_offset;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
if (gridDim.z == 1) {
/// Complete the reduction with no workspace
while (idx_linear < params.outer_count) {
ElementCompute result = reduce_indices_(
params,
shared_storage.workspace.data(),
src_byte_ptr + src_byte_offset,
coord_c);
// Store the result after possible final reduction within the CTA
if (threadIdx.z == 0 && threadIdx.x == 0) {
// Convert to output type and store
NumericConverter<ElementOutput, ElementCompute> convert_output;
ElementOutput cvt = convert_output(result);
*reinterpret_cast<ElementOutput *>(dst_byte_ptr + dst_byte_offset) = cvt;
}
__syncthreads();
// Update indices and pointers
idx_linear += gridDim.y * blockDim.y;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
} // while
}
else {
/// Complete the reduction with workspace
while (idx_linear < params.outer_count) {
ElementCompute result = reduce_indices_(
params,
shared_storage.workspace.data(),
src_byte_ptr + src_byte_offset,
coord_c);
int64_t byte_offset =
blockIdx.z * params.workspace_stride + idx_linear * sizeof_bits<ElementCompute>::value / 8;
// Store the result for final reduction
if (threadIdx.z == 0 && threadIdx.x == 0) {
*reinterpret_cast<ElementCompute *>(dst_byte_ptr + byte_offset) = result;
}
__syncthreads();
// Update indices and pointers
idx_linear += gridDim.y * blockDim.y;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
src_byte_offset,
idx_linear);
} // while
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to perform final reduction
template <
int Rank, ///< Rank of source tensor (e.g. NDHWC => 5)
int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2)
typename ElementOutput, ///< Data type of output tensor
typename ElementSource, ///< Data type of source tensor
typename ReductionOp, ///< Reduction operator
int VectorLength = 1, ///< Vector length for memory
typename ElementCompute = ElementOutput, ///< Internal compute type - input type of reduction operation
int Threads = 256, ///< Number of participating threads
int BatchSize = 4 ///< Number of elements to load per batch
>
class TensorReductionAffineContiguousFinal {
public:
static int const kRank = Rank;
static int const kReducedRank = ReducedRank;
static int const kVectorLength = VectorLength;
static int const kInnerRank = kRank - kReducedRank;
static int const kThreads = Threads;
static int const kBatchSize = BatchSize;
/// Shared memory
struct SharedStorage { };
/// Parameters structure
using Params = TensorReductionAffineContiguousParams<
Rank,
ReducedRank,
ElementOutput,
ElementSource,
ReductionOp,
VectorLength,
ElementCompute,
Threads,
BatchSize
>;
private:
/// Computes the coordinate and offset of a given linear index
CUTLASS_DEVICE
void compute_outer_coord_and_offset_(
Params const ¶ms,
Coord<kReducedRank> & coord,
int64_t &dst_offset,
uint64_t linear_idx) const {
// Decompose into coordinate of rank <kReducedRank>
coord = CoordinateDecomposition<kReducedRank>(linear_idx, params.divmod);
// Compute offsets using destination and source strides
dst_offset = 0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kReducedRank; ++i) {
dst_offset += params.dst_stride[i] * coord[i];
}
}
/// Reduces over the reduction indices
CUTLASS_DEVICE
ElementCompute reduce_indices_(
Params const ¶ms,
ElementCompute const *device_workspace) {
ReductionOp reduction_op(params.reduction_op);
char const *src_byte_ptr = reinterpret_cast<char const *>(device_workspace);
// Accumulated output
ElementCompute accumulator = params.reduction_identity;
for (int iter = 0; iter < params.workspace_count; ++iter) {
ElementCompute workspace_item = *reinterpret_cast<ElementCompute const *>(src_byte_ptr);
accumulator = reduction_op(accumulator, workspace_item);
src_byte_ptr += params.workspace_stride;
}
return accumulator;
}
public:
//
// Methods
//
/// Perform a reduction
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
uint64_t idx_linear = blockIdx.x * blockDim.x + threadIdx.x;
char * dst_byte_ptr = reinterpret_cast<char *>(params.destination);
// Use modulo division to compute location
Coord<kReducedRank> outer_coord;
int64_t dst_byte_offset;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
idx_linear);
/// Complete the reduction
while (idx_linear < params.outer_count) {
ElementCompute result = reduce_indices_(params, params.device_workspace + idx_linear);
// Convert to output type and store
NumericConverter<ElementOutput, ElementCompute> convert_output;
*reinterpret_cast<ElementOutput *>(dst_byte_ptr + dst_byte_offset) = convert_output(result);
// Update indices and pointers
idx_linear += gridDim.x * blockDim.x;
compute_outer_coord_and_offset_(
params,
outer_coord,
dst_byte_offset,
idx_linear);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace reduction
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/reduction/kernel/tensor_reduce_affine_contiguous.h/0 | {
"file_path": "include/cutlass/reduction/kernel/tensor_reduce_affine_contiguous.h",
"repo_id": "include",
"token_count": 7946
} | 38 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing how threads are mapped to a given tile.
*/
#pragma once
#include "cute/arch/mma_sm90_gmma.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace collective {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
using namespace cute;
template <bool Transpose, class SmemLayoutAtom, class ElementType>
constexpr auto
gmma_smem_transpose_or_passthrough() {
if constexpr (Transpose) {
if constexpr (cute::is_same_v<GMMA::Layout_MN_SW128_Atom<ElementType>, SmemLayoutAtom>) {
return GMMA::Layout_K_SW128_Atom<ElementType>{};
}
else if constexpr (cute::is_same_v<GMMA::Layout_MN_SW64_Atom<ElementType>, SmemLayoutAtom>) {
return GMMA::Layout_K_SW64_Atom<ElementType>{};
}
else if constexpr (cute::is_same_v<GMMA::Layout_MN_SW32_Atom<ElementType>, SmemLayoutAtom>) {
return GMMA::Layout_K_SW32_Atom<ElementType>{};
}
else if constexpr (cute::is_same_v<GMMA::Layout_MN_INTER_Atom<ElementType>, SmemLayoutAtom>) {
return GMMA::Layout_K_INTER_Atom<ElementType>{};
}
else {
static_assert(cutlass::detail::dependent_false<SmemLayoutAtom>, "Unsupported Layout_SW_Atom for B SMEM transposition");
}
}
else {
return SmemLayoutAtom{};
}
}
template <class SmemCopyAtom, class ElementType>
constexpr auto
use_universal_transposition() {
if constexpr (sizeof(ElementType) == 1) {
return !cute::is_same_v<GMMA::Layout_MN_SW128_Atom<ElementType>, SmemCopyAtom>;
}
else if constexpr (sizeof(ElementType) == 4){
// Only universal transposition can handle SW64 and Non swizzle SMEM layout
if constexpr (cute::is_same_v<GMMA::Layout_MN_SW64_Atom<ElementType>, SmemCopyAtom> ||
cute::is_same_v<GMMA::Layout_MN_INTER_Atom<ElementType>, SmemCopyAtom>) {
return true;
}
else {
return false;
}
}
else {
static_assert(cutlass::detail::dependent_false<ElementType>, "Unsupported ElementType for B SMEM transposition");
}
}
template<
class TiledMma_,
class SmemLayoutB_,
class SmemLayoutAtomB_,
class ElementB_>
class NoTranspositionOperandB {
public:
using TiledMma = TiledMma_;
using SmemLayoutB = SmemLayoutB_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using ElementB = ElementB_;
constexpr CUTLASS_HOST_DEVICE
NoTranspositionOperandB(
int,
int,
TiledMma,
SmemLayoutB,
SmemLayoutAtomB,
ElementB) { }
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void operator()(
TensorSmemB const&,
TensorTransposedSmemB const&,
int, int) { }
CUTLASS_DEVICE void synchronize(int) { }
CUTLASS_DEVICE void synchronize() { }
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void transpose(
TensorSmemB const&,
TensorTransposedSmemB const&,
int) { }
};
template<
class TiledMma_,
class SmemLayoutB_,
class SmemLayoutAtomB_,
class ElementB_>
class UniversalTranspositionOperandB {
public:
using TiledMma = TiledMma_;
using SmemLayoutB = SmemLayoutB_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using ElementB = ElementB_;
constexpr CUTLASS_HOST_DEVICE
UniversalTranspositionOperandB(
int warp_idx_,
int warp_group_thread_idx_,
TiledMma,
SmemLayoutB,
SmemLayoutAtomB,
ElementB)
: warp_idx(warp_idx_)
, warp_group_thread_idx(warp_group_thread_idx_) { }
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void operator()(
TensorSmemB const& sB,
TensorTransposedSmemB const& gmma_sB,
int read_stage, int current_step) {
if (current_step > 0) {
return;
}
constexpr int NumMathWarpGroup = CUTE_STATIC_V(size(TiledMma{})) / NumThreadsPerWarpGroup;
static_assert(NumMathWarpGroup == 1 ||
(!detail::use_universal_transposition<SmemLayoutAtomB, ElementB>() && NumMathWarpGroup == 2),
"Wrong math warp group number for TransposeB");
constexpr int WarpgroupTileSize = size<1>(SmemLayoutB{}); // A warp group tile would process entire Smem K.
constexpr int BytesPerSmemSwizzleUnit = 16;
constexpr int WarpThreadShapeN = BytesPerSmemSwizzleUnit / sizeof(ElementB);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Universal transposition, need warp_group sync between load and store.
/// The number of reg used depends on the input elementB.
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
In one copy step, a warp group would load WarpgroupTileSize * WarpgroupTileSize tile then store to transposed location.
In warp_group_tile, each warp holds Four WarpTileSize x WarpTileSize elements:
K
------------
| W0 W1 W2 W3 ---
| W0 W1 W2 W3 |
| W0 W1 W2 W3 | --> Copy Step 0
| W0 W1 W2 W3 ---
....
| W0 W1 W2 W3 ---
| W0 W1 W2 W3 |
| W0 W1 W2 W3 | --> Copy Step n
| W0 W1 W2 W3 ---
*/
static_assert((NumThreadsPerWarpGroup % WarpThreadShapeN == 0), "Unsupported warp thread layout.");
constexpr auto WarpgroupThreadLayout = make_layout(make_shape(Int<WarpThreadShapeN>{}, Int<NumThreadsPerWarpGroup / WarpThreadShapeN>{}));
// Get copy tile and partition to each thread
auto sB_tiled_copy = make_tiled_copy(
Copy_Atom<DefaultCopy, ElementB>{},
WarpgroupThreadLayout, // thr_layout
Layout<_1>{} // val_layout
);
static_assert(size(sB_tiled_copy) == size(TiledMma{}), "Wrong thread number in TiledCopy.");
auto sB_thr_copy = sB_tiled_copy.get_thread_slice(warp_group_thread_idx);
Tensor tCsB = sB_thr_copy.partition_S( sB(_,_,read_stage)); // (CPY, CPY_N, CPY_K)
Tensor tCsB_transposed = sB_thr_copy.partition_D(gmma_sB(_,_,read_stage)); // (CPY, CPY_N, CPY_K)
// Divide partitioned tile to limit register usage
constexpr int CopySteps = size<0>(SmemLayoutB{}) / WarpgroupTileSize;
constexpr auto CopyTileShape = make_shape(size<0>(tCsB), Int< size<1>(tCsB) / CopySteps >{}, size<2>(tCsB));
static_assert(size<1>(tCsB) % CopySteps == 0, "CopySteps must evenly divide rank 1 size of partitioned SMEM.");
Tensor tCsB_copy_tile = zipped_divide(tCsB, CopyTileShape);
Tensor tCsB_copy_tile_transposed = zipped_divide(tCsB_transposed, CopyTileShape);
auto transpose_fragment = make_fragment_like(tCsB_copy_tile(_,_0{}));
CUTLASS_PRAGMA_NO_UNROLL
for (int step = 0; step < CopySteps; ++step) {
copy(sB_tiled_copy, tCsB_copy_tile(_,step), transpose_fragment);
// Make sure all elements are read before being overwritten
__syncthreads();
copy(sB_tiled_copy, transpose_fragment, tCsB_copy_tile_transposed(_,step));
}
}
CUTLASS_DEVICE void synchronize(int step) {
if (step == 0) {
// SMEM fence to make sure B is transposed before math
cutlass::arch::fence_view_async_shared();
cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier);
}
}
CUTLASS_DEVICE void synchronize() {
// SMEM fence to make sure B is transposed before math
cutlass::arch::fence_view_async_shared();
cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier);
}
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void transpose(
TensorSmemB const& sB,
TensorTransposedSmemB const& gmma_sB,
int read_stage) {
this->operator()(sB, gmma_sB, read_stage, 0);
synchronize();
}
private:
const int warp_idx;
const int warp_group_thread_idx;
};
template<
class TiledMma_,
class SmemLayoutB_,
class SmemLayoutAtomB_,
class ElementB_>
class AsyncTranspositionOperandB {
public:
using TiledMma = TiledMma_;
using SmemLayoutB = SmemLayoutB_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using ElementB = ElementB_;
static constexpr int Steps = 2;
static constexpr int NumMathWarpGroup = CUTE_STATIC_V(size(TiledMma{})) / NumThreadsPerWarpGroup;
static constexpr int StepsPerWarpGroup = Steps / NumMathWarpGroup;
static_assert(NumMathWarpGroup <= 2,
"Wrong math warp group number for TransposeB");
static constexpr int WarpgroupTileSize = size<1>(SmemLayoutB{}); // A warp group tile would process entire Smem K.
static constexpr int NumWarpsPerWarpGroup = NumThreadsPerWarpGroup / NumThreadsPerWarp;
static constexpr int BytesPerSmemSwizzleUnit = 16;
static constexpr int WarpThreadShapeN = BytesPerSmemSwizzleUnit / sizeof(ElementB);
static constexpr int WarpThreadShapeK = NumThreadsPerWarp / WarpThreadShapeN;
static constexpr int NumWarpTilePerWarpgroupTile = NumWarpsPerWarpGroup * (Steps == 8 ? 2 : 1);
static constexpr int WarpTileSize = WarpgroupTileSize / NumWarpTilePerWarpgroupTile;
static_assert(WarpTileSize >= WarpThreadShapeN && WarpTileSize >= WarpThreadShapeK, "Invaild warp thread shape." );
static constexpr int TilesPerWarp = 2; // Each Warp would process 2 warp_tiles in one step.
static constexpr int64_t WarpTileNCoordLUT = 06723763275316420;
static constexpr int64_t WarpTileKCoordLUT = 05410541064206420;
static constexpr int NumStepsEncoded = 4; // Only encoding first 4 steps into LUT.
static constexpr int MaskPerStep = 07; // Each step is encoded into 3bits,
static constexpr int NumBitsPerStep = 3;
static constexpr int MaskPerWarp = 07777; // Each warp has 4 steps(12 bits)
static constexpr int NumBitsPerWarp = 12;
// Number of warp_group_tiles
static_assert(size<0>(SmemLayoutB{}) % WarpgroupTileSize == 0,
"Copy size must evenly divide SMEM tile.");
static constexpr int WarpgroupTileNum = size<0>(SmemLayoutB{}) / WarpgroupTileSize;
static_assert(size<2>(typename TiledMma::AtomShape_MNK{}) <= WarpThreadShapeK,
"Need to be able to transpose first k-block in the first step");
constexpr CUTLASS_HOST_DEVICE
AsyncTranspositionOperandB(
int warp_idx_,
int warp_group_thread_idx_,
TiledMma,
SmemLayoutB,
SmemLayoutAtomB,
ElementB)
: warp_idx(warp_idx_)
, warp_group_thread_idx(warp_group_thread_idx_)
, warp_idx_in_warp_group(warp_idx_ % NumWarpsPerWarpGroup)
, current_warp_tile_n_coord_LUT((WarpTileNCoordLUT >> ((warp_idx_
% NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp)
, current_warp_tile_k_coord_LUT((WarpTileKCoordLUT >> ((warp_idx_
% NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp) { }
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void operator()(
TensorSmemB const& sB,
TensorTransposedSmemB const& gmma_sB,
int read_stage, int current_step)
{
if (current_step >= StepsPerWarpGroup) {
return;
}
static constexpr auto WarpThreadLayout = make_layout(make_shape(Int<WarpThreadShapeN>{}, Int<WarpThreadShapeK>{}));
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// A warp group uses 2 steps to transpose the whole WarpgroupTileSize x WarpgroupTileSize.
/// In each step, one warp would hold two warp_tiles.
/// Step 0: Step 1:
/// W0 W1 W2 W3 -- -- -- --
/// W1 W0 -- -- -- -- W3 W2
/// W2 -- -- -- -- W3 W0 W1
/// W3 -- -- -- -- W2 W1 W0
///
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// Fully static coord LUT to avoid extra register use.
/// [warp_id][step][warp_tile][n / k]
/// Step 0 Step 1 Step 2 Step 3 Step 4 Step 5 Step 6 Step 7
/// {{{0,0}, {1,1}}, {{2,2}, {3,3}}, {{4,4}, {5,5}}, {{6,6}, {7,7}}, {{4,0}, {0,4}}, {{4,1}, {1,4}}, {{4,2}, {2,4}}, {{4,3}, {3,4}}}, // W0
/// {{{1,0}, {0,1}}, {{3,2}, {2,3}}, {{5,4}, {4,5}}, {{7,6}, {6,7}}, {{5,0}, {0,5}}, {{5,1}, {1,5}}, {{5,2}, {2,5}}, {{5,3}, {3,5}}}, // W1
/// {{{2,0}, {0,2}}, {{3,1}, {1,3}}, {{6,4}, {4,6}}, {{7,5}, {5,7}}, {{6,0}, {0,6}}, {{6,1}, {1,6}}, {{6,2}, {2,6}}, {{6,3}, {3,6}}}, // W2
/// {{{3,0}, {0,3}}, {{2,1}, {1,2}}, {{7,4}, {4,7}}, {{6,5}, {5,6}}, {{7,0}, {0,7}}, {{7,1}, {1,7}}, {{7,2}, {2,7}}, {{7,3}, {3,7}}}, // W3
///
/// Encoding the coord of warp tile0 into two int64_t values.
/// Only encoding Step 0 ~ Step 4, since Step 5 ~ Step 7 have a straightforward pattern.
/// Only encoding warp tile0, since the coords of warp tile1 could be easily deduced from warp tile0.
/// The 2-step transposition and the 8-step transposition share the same encoding.
///
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Divide entire SMEM to multiple warp_tiles
constexpr auto WarpTileShape = make_shape(Int<WarpTileSize>(), Int<WarpTileSize>());
Tensor s_tile = zipped_divide( sB(_,_,read_stage), WarpTileShape);
Tensor s_tile_transposed = zipped_divide(gmma_sB(_,_,read_stage), WarpTileShape);
// Get copy tile
auto sB_tiled_copy = make_tiled_copy(
Copy_Atom<DefaultCopy, ElementB>{},
WarpThreadLayout, // thr_layout
Layout<_1>{} // val_layout
);
static_assert(size(sB_tiled_copy) * NumWarpsPerWarpGroup == size(TiledMma{}) / NumMathWarpGroup, "Wrong thread number in TiledCopy.");
auto sB_thr_copy = sB_tiled_copy.get_thread_slice(warp_group_thread_idx % NumThreadsPerWarp); // slice based on lane_idx
// Construct fragments for transposition
Tensor tmp_tCsB = sB_thr_copy.partition_S(flatten(s_tile(_, make_coord(_0{}, _0{}))));
decltype(make_fragment_like(tmp_tCsB)) transpose_fragments[TilesPerWarp] = {
make_fragment_like(tmp_tCsB),
make_fragment_like(tmp_tCsB)
};
[[maybe_unused]] int step = current_step * NumMathWarpGroup;
if constexpr (NumMathWarpGroup == 2) {
// For 2 math warpgroup, warp idx4~7 is 1st warp group and 8~9 is 2nd, so decide if 2nd warpgroup need warp idx divide 8.
step += warp_idx / (NumWarpsPerWarpGroup * 2);
}
int tmp_warp_tile_n_coord_LUT = current_warp_tile_n_coord_LUT >> (NumBitsPerStep * current_step);
int tmp_warp_tile_k_coord_LUT = current_warp_tile_k_coord_LUT >> (NumBitsPerStep * current_step);
if constexpr (NumMathWarpGroup == 2) {
tmp_warp_tile_n_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2));
tmp_warp_tile_k_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2));
}
// decoding the warp tile coord.
int warp_tile0_n, warp_tile0_k;
if constexpr (StepsPerWarpGroup <= NumStepsEncoded) {
warp_tile0_n = tmp_warp_tile_n_coord_LUT & MaskPerStep;
warp_tile0_k = tmp_warp_tile_k_coord_LUT & MaskPerStep;
} else {
warp_tile0_n = step < NumStepsEncoded ? (tmp_warp_tile_n_coord_LUT & MaskPerStep) : 4 + warp_idx_in_warp_group;
warp_tile0_k = step < NumStepsEncoded ? (tmp_warp_tile_k_coord_LUT & MaskPerStep) : step - 4;
}
int warp_tile1_n = warp_tile0_n == warp_tile0_k ? warp_tile0_n + 1 : warp_tile0_k;
int warp_tile1_k = warp_tile0_n == warp_tile0_k ? warp_tile0_k + 1 : warp_tile0_n;
CUTLASS_PRAGMA_UNROLL
for (int warp_group_tile = 0; warp_group_tile < WarpgroupTileNum; ++warp_group_tile) {
static_assert(TilesPerWarp == 2);
// [warp_tile][n/k]
const int warp_tile_coord[TilesPerWarp][2] = {
// n k
{warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile0_n, warp_tile0_k}, // warp_tile 0
{warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile1_n, warp_tile1_k} // warp_tile 1
};
CUTLASS_PRAGMA_UNROLL
for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) {
Tensor tCsB = sB_thr_copy.partition_S(
flatten(s_tile(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1])))
); // (CPY, CPY_N, CPY_K)
copy(sB_tiled_copy, tCsB, transpose_fragments[warp_tile]);
}
// Make sure elements in two 8x8 warp tiles are all consumed
__syncwarp();
CUTLASS_PRAGMA_UNROLL
for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) {
Tensor tCsB_transposed = sB_thr_copy.partition_D(
flatten(s_tile_transposed(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1])))
); // (CPY, CPY_N, CPY_K)
copy(sB_tiled_copy, transpose_fragments[warp_tile], tCsB_transposed);
}
} // loop warp_group_tile
}
CUTLASS_DEVICE void synchronize(int step) {
if (step < StepsPerWarpGroup) {
// SMEM fence to make sure B is transposed before math
cutlass::arch::fence_view_async_shared();
cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier);
}
}
CUTLASS_DEVICE void synchronize() {
cutlass::arch::fence_view_async_shared();
cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier);
}
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void transpose(
TensorSmemB const& sB,
TensorTransposedSmemB const& gmma_sB,
int read_stage) {
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < StepsPerWarpGroup; ++i) {
this->operator()(sB, gmma_sB, read_stage, i);
}
synchronize();
}
private:
const int warp_idx;
const int warp_group_thread_idx;
const int warp_idx_in_warp_group;
const int current_warp_tile_n_coord_LUT;
const int current_warp_tile_k_coord_LUT;
};
template<
class TiledMma_,
class SmemLayoutB_,
class SmemLayoutAtomB_,
class ElementB_>
class AsyncTranspositionOperandB_1BElementB {
public:
static_assert(sizeof(ElementB_) == 1);
using TiledMma = TiledMma_;
using SmemLayoutB = SmemLayoutB_;
using SmemLayoutAtomB = SmemLayoutAtomB_;
using ElementB = ElementB_;
static constexpr int Steps = 8;
static constexpr int NumMathWarpGroup = CUTE_STATIC_V(size(TiledMma{})) / NumThreadsPerWarpGroup;
static constexpr int StepsPerWarpGroup = Steps / NumMathWarpGroup;
static_assert(NumMathWarpGroup <= 2,
"Wrong math warp group number for TransposeB");
static constexpr int WarpgroupTileSize = size<1>(SmemLayoutB{}); // A warp group tile would process entire Smem K.
static constexpr int NumWarpsPerWarpGroup = NumThreadsPerWarpGroup / NumThreadsPerWarp;
static constexpr int BytesPerSmemSwizzleUnit = 16;
static constexpr int WarpThreadShapeN = BytesPerSmemSwizzleUnit / sizeof(ElementB);
static constexpr int WarpThreadShapeK = NumThreadsPerWarp / WarpThreadShapeN;
static constexpr int NumWarpTilePerWarpgroupTile = NumWarpsPerWarpGroup * (Steps == 8 ? 2 : 1);
static constexpr int WarpTileSize = WarpgroupTileSize / NumWarpTilePerWarpgroupTile;
static_assert(WarpTileSize >= WarpThreadShapeN && WarpTileSize >= WarpThreadShapeK, "Invaild warp thread shape." );
static constexpr int TilesPerWarp = 2; // Each Warp would process 2 warp_tiles in one step.
static constexpr int64_t WarpTileNCoordLUT = 06723763275316420;
static constexpr int64_t WarpTileKCoordLUT = 05410541064206420;
static constexpr int NumStepsEncoded = 4; // Only encoding first 4 steps into LUT.
static constexpr int MaskPerStep = 07; // Each step is encoded into 3bits,
static constexpr int NumBitsPerStep = 3;
static constexpr int MaskPerWarp = 07777; // Each warp has 4 steps(12 bits)
static constexpr int NumBitsPerWarp = 12;
// Number of warp_group_tiles
static_assert(size<0>(SmemLayoutB{}) % WarpgroupTileSize == 0,
"Copy size must evenly divide SMEM tile.");
static constexpr int WarpgroupTileNum = size<0>(SmemLayoutB{}) / WarpgroupTileSize;
constexpr CUTLASS_HOST_DEVICE
AsyncTranspositionOperandB_1BElementB(
int warp_idx_,
int warp_group_thread_idx_,
TiledMma,
SmemLayoutB,
SmemLayoutAtomB,
ElementB)
: warp_idx(warp_idx_)
, warp_group_thread_idx(warp_group_thread_idx_)
, warp_idx_in_warp_group(warp_idx_ % NumWarpsPerWarpGroup)
, current_warp_tile_n_coord_LUT((WarpTileNCoordLUT >> ((warp_idx_
% NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp)
, current_warp_tile_k_coord_LUT((WarpTileKCoordLUT >> ((warp_idx_
% NumWarpsPerWarpGroup) * NumBitsPerWarp)) & MaskPerWarp) { }
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void operator()(
TensorSmemB const& sB,
TensorTransposedSmemB const& gmma_sB,
int read_stage, int current_step)
{
if (current_step > 0) {
return;
}
constexpr auto WarpThreadLayout = make_layout(make_shape(Int<WarpThreadShapeN>{}, Int<WarpThreadShapeK>{}));
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// A warp group uses 8 steps to transpose the whole WarpgroupTileSize x WarpgroupTileSize.
/// Divide a warp_group_tile into 8x8 warp_tiles to further reduce the reg usage.
/// Step 0: Step 1: Step 2: Step 3:
/// W0 W1 W2 W3 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
/// W1 W0 -- -- -- -- -- -- -- -- W3 W2 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
/// W2 -- -- -- -- -- -- -- -- W3 W0 W1 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
/// W3 -- -- -- -- -- -- -- -- W2 W1 W0 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
/// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- --
/// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W1 W0 -- -- -- -- -- -- -- -- W3 W2
/// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W2 -- -- -- -- -- -- -- -- W3 W0 W1
/// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W3 -- -- -- -- -- -- -- -- W2 W1 W0
///
/// Step 4: Step 5: Step 6: Step 7:
/// -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
/// -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
/// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3 -- -- -- -- -- -- -- --
/// -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- W0 W1 W2 W3
/// W0 -- -- -- -- -- -- -- -- W0 -- -- -- -- -- -- -- -- W0 -- -- -- -- -- -- -- -- W0 -- -- -- --
/// W1 -- -- -- -- -- -- -- -- W1 -- -- -- -- -- -- -- -- W1 -- -- -- -- -- -- -- -- W1 -- -- -- --
/// W2 -- -- -- -- -- -- -- -- W2 -- -- -- -- -- -- -- -- W2 -- -- -- -- -- -- -- -- W2 -- -- -- --
/// W3 -- -- -- -- -- -- -- -- W3 -- -- -- -- -- -- -- -- W3 -- -- -- -- -- -- -- -- W3 -- -- -- --
///
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// Fully static coord LUT to avoid extra register use.
/// [warp_id][step][warp_tile][n / k]
/// Step 0 Step 1 Step 2 Step 3 Step 4 Step 5 Step 6 Step 7
/// {{{0,0}, {1,1}}, {{2,2}, {3,3}}, {{4,4}, {5,5}}, {{6,6}, {7,7}}, {{4,0}, {0,4}}, {{4,1}, {1,4}}, {{4,2}, {2,4}}, {{4,3}, {3,4}}}, // W0
/// {{{1,0}, {0,1}}, {{3,2}, {2,3}}, {{5,4}, {4,5}}, {{7,6}, {6,7}}, {{5,0}, {0,5}}, {{5,1}, {1,5}}, {{5,2}, {2,5}}, {{5,3}, {3,5}}}, // W1
/// {{{2,0}, {0,2}}, {{3,1}, {1,3}}, {{6,4}, {4,6}}, {{7,5}, {5,7}}, {{6,0}, {0,6}}, {{6,1}, {1,6}}, {{6,2}, {2,6}}, {{6,3}, {3,6}}}, // W2
/// {{{3,0}, {0,3}}, {{2,1}, {1,2}}, {{7,4}, {4,7}}, {{6,5}, {5,6}}, {{7,0}, {0,7}}, {{7,1}, {1,7}}, {{7,2}, {2,7}}, {{7,3}, {3,7}}}, // W3
///
/// Encoding the coord of warp tile0 into two int64_t values.
/// Only encoding Step 0 ~ Step 4, since Step 5 ~ Step 7 have a straightforward pattern.
/// Only encoding warp tile0, since the coords of warp tile1 could be easily deduced from warp tile0.
/// The 2-step transposition and the 8-step transposition share the same encoding.
///
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Divide entire SMEM to multiple warp_tiles
constexpr auto WarpTileShape = make_shape(Int<WarpTileSize>(), Int<WarpTileSize>());
Tensor s_tile = zipped_divide( sB(_,_,read_stage), WarpTileShape);
Tensor s_tile_transposed = zipped_divide(gmma_sB(_,_,read_stage), WarpTileShape);
// Get copy tile
auto sB_tiled_copy = make_tiled_copy(
Copy_Atom<DefaultCopy, ElementB>{},
WarpThreadLayout, // thr_layout
Layout<_1>{} // val_layout
);
static_assert(size(sB_tiled_copy) * NumWarpsPerWarpGroup == size(TiledMma{}) / NumMathWarpGroup, "Wrong thread number in TiledCopy.");
auto sB_thr_copy = sB_tiled_copy.get_thread_slice(warp_group_thread_idx % NumThreadsPerWarp); // slice based on lane_idx
// Construct fragments for transposition
Tensor tmp_tCsB = sB_thr_copy.partition_S(flatten(s_tile(_, make_coord(_0{}, _0{}))));
decltype(make_fragment_like(tmp_tCsB)) transpose_fragments[TilesPerWarp] = {
make_fragment_like(tmp_tCsB),
make_fragment_like(tmp_tCsB)
};
CUTLASS_PRAGMA_NO_UNROLL
for (int warp_group_tile = 0; warp_group_tile < WarpgroupTileNum; ++warp_group_tile) {
int tmp_warp_tile_n_coord_LUT = current_warp_tile_n_coord_LUT;
int tmp_warp_tile_k_coord_LUT = current_warp_tile_k_coord_LUT;
constexpr int StepsPerWarpGroup = Steps / NumMathWarpGroup;
if constexpr (NumMathWarpGroup == 2) {
tmp_warp_tile_n_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2));
tmp_warp_tile_k_coord_LUT >>= NumBitsPerStep * (warp_idx / (NumWarpsPerWarpGroup * 2));
}
CUTLASS_PRAGMA_NO_UNROLL
for (int step_per_warp_group = 0; step_per_warp_group < StepsPerWarpGroup; ++step_per_warp_group) {
// For 2 math warpgroup, warp idx4~7 is 1st warp group and 8~9 is 2nd, so decide if 2nd warpgroup need warp idx divide 8.
int step = step_per_warp_group * NumMathWarpGroup + warp_idx / (NumWarpsPerWarpGroup * 2);
// decoding the warp tile coord.
int warp_tile0_n = step < NumStepsEncoded ? (tmp_warp_tile_n_coord_LUT & MaskPerStep) : 4 + warp_idx_in_warp_group;
int warp_tile0_k = step < NumStepsEncoded ? (tmp_warp_tile_k_coord_LUT & MaskPerStep) : step - 4;
int warp_tile1_n = warp_tile0_n == warp_tile0_k ? warp_tile0_n + 1 : warp_tile0_k;
int warp_tile1_k = warp_tile0_n == warp_tile0_k ? warp_tile0_k + 1 : warp_tile0_n;
tmp_warp_tile_n_coord_LUT >>= NumBitsPerStep;
tmp_warp_tile_k_coord_LUT >>= NumBitsPerStep;
static_assert(TilesPerWarp == 2);
// [warp_tile][n/k]
const int warp_tile_coord[TilesPerWarp][2] = {
// n k
{warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile0_n, warp_tile0_k}, // warp_tile 0
{warp_group_tile * NumWarpTilePerWarpgroupTile + warp_tile1_n, warp_tile1_k} // warp_tile 1
};
CUTLASS_PRAGMA_UNROLL
for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) {
Tensor tCsB = sB_thr_copy.partition_S(
flatten(s_tile(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1])))
); // (CPY, CPY_N, CPY_K)
copy(sB_tiled_copy, tCsB, transpose_fragments[warp_tile]);
}
// Make sure elements in two 8x8 warp tiles are all consumed
__syncwarp();
CUTLASS_PRAGMA_UNROLL
for (int warp_tile = 0; warp_tile < TilesPerWarp; ++warp_tile) {
Tensor tCsB_transposed = sB_thr_copy.partition_D(
flatten(s_tile_transposed(_, make_coord(warp_tile_coord[warp_tile][0], warp_tile_coord[warp_tile][1])))
); // (CPY, CPY_N, CPY_K)
copy(sB_tiled_copy, transpose_fragments[warp_tile], tCsB_transposed);
}
} // lock step
} // loop warp_group_tile
}
CUTLASS_DEVICE void synchronize(int step) {
if (step == 0) {
// SMEM fence to make sure B is transposed before math
cutlass::arch::fence_view_async_shared();
cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier);
}
}
CUTLASS_DEVICE void synchronize() {
cutlass::arch::fence_view_async_shared();
cutlass::arch::NamedBarrier::sync(size(TiledMma{}), cutlass::arch::ReservedNamedBarriers::TransposeBarrier);
}
template <
class TensorSmemB,
class TensorTransposedSmemB>
CUTLASS_DEVICE void transpose(
TensorSmemB const& sB,
TensorTransposedSmemB const& gmma_sB,
int read_stage) {
this->operator()(sB, gmma_sB, read_stage, 0);
synchronize();
}
private:
const int warp_idx;
const int warp_group_thread_idx;
const int warp_idx_in_warp_group;
const int current_warp_tile_n_coord_LUT;
const int current_warp_tile_k_coord_LUT;
};
template<
class TiledMma,
class SmemLayoutB,
class SmemLayoutAtomB,
class ElementB,
bool TransposeB
>
constexpr CUTLASS_HOST_DEVICE
auto
make_transpose_operand_b(
int warp_idx,
int warp_group_thread_idx,
TiledMma,
SmemLayoutB,
SmemLayoutAtomB,
ElementB,
cute::bool_constant<TransposeB>)
{
if constexpr (!TransposeB) {
return NoTranspositionOperandB(
warp_idx, warp_group_thread_idx, TiledMma{},
SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{});
}
else if constexpr (use_universal_transposition<SmemLayoutAtomB, ElementB>()) {
return UniversalTranspositionOperandB(
warp_idx, warp_group_thread_idx, TiledMma{},
SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{});
}
else if constexpr (sizeof(ElementB) == 1) {
return AsyncTranspositionOperandB_1BElementB(
warp_idx, warp_group_thread_idx, TiledMma{},
SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{});
}
else {
return AsyncTranspositionOperandB(
warp_idx, warp_group_thread_idx, TiledMma{},
SmemLayoutB{}, SmemLayoutAtomB{}, ElementB{});
}
}
}; // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace collective
} // namespace transform
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/collective/sm90_wgmma_transpose.hpp/0 | {
"file_path": "include/cutlass/transform/collective/sm90_wgmma_transpose.hpp",
"repo_id": "include",
"token_count": 14474
} | 39 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing loading of tiles from pitch-linear rank=2 tensors.
This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile
first, with the objective of minimizing predicate mask updates during steady-state operation.
A precomputed "Params" object minimizes the amount of state that must be stored in registers,
and integer addition is used to advance the pointer through memory.
*/
#pragma once
#include "cutlass/transform/threadblock/predicated_tile_access_iterator_2dthreadtile.h"
#include "cutlass/transform/thread/transpose.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace transform {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// PredicatedTileIterator2dThreadTile
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
/// Regular tile iterator using a precomputed control structure to minimize register liveness
/// and integer arithmetic.
///
/// Layout is assumed to be invariant at the time the precomputed "Params" object is constructed.
///
/// Base pointer and tensor extents may be specified at the time the iterator is constructed.
/// Subsequently, they are assumed to be immutable.
///
/// Adding a logical coordinate offset may be performed at the time the iterator is constructed.
/// Subsequent additions to logical coordinate offset may be performed but are relatively expensive.
///
/// Vistitation order is intended to first visit a "residual" tile that may be partially full in
/// both the advance dimension and the steady-state dimension. This is assumed to be the last
/// tile in the iteration sequence. Advancing an iterator that has just been constructed moves to
/// the first tile that is full in the advance dimension and recomputes predicates. Subsequent
/// accesses may be performed without updating internal predicates and are efficient in terms of
/// live register state and pointer arithmetic instructions.
///
/// To be efficient, this assumes the iteraor will be dereferenced and advanced at least once
/// outside any looping structure to minimize integer arithmetic.
///
/// Acceses out of bounds are safe so long as `clear_mask()` is called prior to dereferencing
/// the iterator.
///
///
/// Example:
///
/// An efficient pipeline structure may be constructed as follows:
///
// template <typename Iterator>
// __global__ void kernel(
// typename Iterator::Params params,
// typename Iterator::Element *ptr,
// TensorCoord extent) {
//
// typename Iterator::Fragment fragment;
//
// TensorCoord threadblock_offset(0, 0);
//
// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets);
//
//
// fragment = *iter; // load "residue" tile first
// ++iter; // advance to first "steady state" tile and update internal masks
//
//
// #pragma unroll
// for (int i = Remaining - 1; i >= 0; --i) {
//
// f(fragment);
//
// if (!i) {
// iter.clear_mask(); // light-weight operation to clear masks - subsequent loads become NO-OPs.
// }
//
// fragment = *iter; // load tile during "steady state" phase
// ++iter; // advance to next tile - lightweight due to steady-state masks
// }
// }
//
// void host(TensorView<Element, 2, layout::PitchLinear> view) {
//
// using Iterator = transform::threadblock::PredicatedTileIterator2dThreadTile;
//
// typename Iterator::Params params(view.layout());
//
// kernel<Iterator>(params, view.data());
// }
///
///
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
bool Transpose = false
>
class PredicatedTileIterator2dThreadTile;
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, bool Transpose_>
class PredicatedTileIterator2dThreadTile<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, Transpose_> {
public:
static_assert(
AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::PitchLinear;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
/// Type used for internal memory accesses
/// extra set of parenthesis is needed for VS compiler
struct alignas((ThreadMap::kElementsPerAccess * sizeof_bits<Element>::value /
8)) AccessType {
Array<Element, ThreadMap::kElementsPerAccess> storage;
static int const kElements = ThreadMap::kElementsPerAccess;
};
/// Optinally this fragment can be 4x4 transposed
using Transform = thread::Transpose< ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount , layout::PitchLinearShape<4,4>, Element>;
static bool const transpose = Transpose_;
/// Underlying iterator to compute the addresses
using TileAccessIterator =
PredicatedTileAccessIterator2dThreadTile<Shape, Element, Layout, kAdvanceRank,
ThreadMap, AccessType>;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount *
ThreadMap::ThreadAccessShape::kCount>;
/// Predicate vector stores mask to guard accesses
using Mask = typename TileAccessIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
public:
using Base = typename TileAccessIterator::Params::Base;
friend PredicatedTileIterator2dThreadTile;
private:
/// Parameters object
typename TileAccessIterator::Params params_;
public:
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout) : params_(layout) { }
CUTLASS_HOST_DEVICE
Params() { }
CUTLASS_HOST_DEVICE
Params(Base const &base)
: params_(base) {}
};
private:
/// Internal pointer type permits fast address arithmetic
using BytePointer = char *;
private:
//
// Data members
//
/// Data member to the tile access iterator
TileAccessIterator address_iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset,
/// and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
/// Precomputed parameters object
Params const ¶ms,
/// Pointer to start of tensor
Pointer pointer,
/// Extent of tensor
TensorCoord extent,
/// ID of each participating thread
int thread_id,
/// Initial offset of threadblock
TensorCoord const &threadblock_offset,
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
)
: address_iterator_(params.params_, pointer, extent, thread_id,
threadblock_offset) {}
/// Construct a PredicatedTileIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
)
: PredicatedTileIterator2dThreadTile(params, pointer, extent, thread_id,
make_Coord(0, 0)) {}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
address_iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile &operator++() {
if (kAdvanceRank)
address_iterator_.add_tile_offset({0, 1});
else
address_iterator_.add_tile_offset({1, 0});
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the
/// iterator's internal pointer is reverted to the first "steady state" tile.
/// Subsequent calls are lightweight and must only update the internal
/// pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile operator++(int) {
PredicatedTileIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); }
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() { address_iterator_.enable_mask(); }
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) { address_iterator_.set_mask(mask); }
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) { address_iterator_.get_mask(mask); }
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int ts = 0; ts < ThreadMap::ThreadAccessShape::kStrided; ts++){
int access_idx = ts + c * ThreadMap::ThreadAccessShape::kStrided + \
s * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided;
address_iterator_.set_iteration_index(access_idx);
if (address_iterator_.valid()) {
frag_ptr[access_idx] =
*(address_iterator_.get() + pointer_offset);
}
++address_iterator_;
}
}
}
if (transpose) {
Transform t;
t.transform(frag, frag);
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) { load_with_pointer_offset(frag, 0); }
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int ts = 0; ts < ThreadMap::ThreadAccessShape::kStrided; ts++){
int access_idx = ts + c * ThreadMap::ThreadAccessShape::kStrided + \
s * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided;
address_iterator_.set_iteration_index(access_idx);
if (address_iterator_.valid()) {
*(address_iterator_.get() + pointer_offset) = frag_ptr[access_idx];
}
++address_iterator_;
}
}
}
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); }
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
bool Transpose_
>
class PredicatedTileIterator2dThreadTile<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, Transpose_> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::ColumnMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static bool const Transpose = Transpose_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIterator2dThreadTile<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 0 : 1),
ThreadMap,
Transpose
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIterator2dThreadTile;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) {}
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset, ///< Initial offset of threadblock
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.row(), extent.column()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())
) { }
/// Construct a PredicatedTileIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile operator++(int) {
PredicatedTileIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Specialization of PredicatedTileIterator2dThreadTile for pitch-linear data.
///
/// Satisfies: ForwardTileIteratorConcept |
/// ReadableContiguousTileIteratorConcept |
/// WriteableContiguousTileIteratorConcept |
/// MaskedTileIteratorConcept
///
template <
typename Shape_,
typename Element_,
int AdvanceRank,
typename ThreadMap_,
bool Transpose_
>
class PredicatedTileIterator2dThreadTile<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, Transpose_> {
public:
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
"Specialization for pitch-linear iterator may along advance along the "
"contiguous(rank=0) or strided(rank=1) dimension.");
using Shape = Shape_;
using Element = Element_;
using Layout = layout::RowMajor;
static int const kAdvanceRank = AdvanceRank;
using ThreadMap = ThreadMap_;
static bool const Transpose = Transpose_;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorRef = TensorRef<Element, Layout>;
using TensorView = TensorView<Element, Layout>;
using TensorCoord = typename Layout::TensorCoord;
using Pointer = Element *;
using NonConstPointer = typename platform::remove_const<Element>::type *;
using UnderlyingIterator = PredicatedTileIterator2dThreadTile<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
Element,
layout::PitchLinear,
(kAdvanceRank == 0 ? 1 : 0),
ThreadMap,
Transpose
>;
using AccessType = typename UnderlyingIterator::AccessType;
/// Fragment object to be loaded or stored
using Fragment = cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kCount>;
/// Predicate vector stores mask to guard accesses
using Mask = typename UnderlyingIterator::Mask;
/// Parameters object is precomputed state and is host-constructible
class Params {
private:
friend PredicatedTileIterator2dThreadTile;
/// Parameters object
typename UnderlyingIterator::Params params_;
public:
CUTLASS_HOST_DEVICE
Params() { }
/// Construct the Params object given a pitch-linear tensor's layout
CUTLASS_HOST_DEVICE
Params(Layout const &layout): params_(layout::PitchLinear(layout.stride(0))) { }
CUTLASS_HOST_DEVICE
Params(typename UnderlyingIterator::Params::Base const &base)
: params_(base) {}
};
private:
//
// Data members
//
/// Underlying pitch-linear tile iterator
UnderlyingIterator iterator_;
public:
/// Constructs a TileIterator from its precomputed state, threadblock offset, and thread ID
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id, ///< ID of each participating thread
TensorCoord const &threadblock_offset, ///< Initial offset of threadblock
int const *indices = nullptr ///< gather/scatter indices, note no support for gather/scatter at this specialization
):
iterator_(
params.params_,
pointer,
layout::PitchLinearCoord(extent.column(), extent.row()),
thread_id,
layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())
) { }
/// Construct a PredicatedTileIterator2dThreadTile with zero threadblock offset
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile(
Params const ¶ms, ///< Precomputed parameters object
Pointer pointer, ///< Pointer to start of tensor
TensorCoord extent, ///< Extent of tensor
int thread_id ///< ID of each participating thread
): PredicatedTileIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) { }
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
iterator_.add_pointer_offset(pointer_offset);
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile &operator++() {
++iterator_;
return *this;
}
/// Advances to the next tile in memory.
///
/// The first time this method is called, predicates are updated, and the iterator's
/// internal pointer is reverted to the first "steady state" tile. Subsequent calls
/// are lightweight and must only update the internal pointer.
CUTLASS_HOST_DEVICE
PredicatedTileIterator2dThreadTile operator++(int) {
PredicatedTileIterator2dThreadTile self(*this);
operator++();
return self;
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void clear_mask(bool enable = true) {
iterator_.clear_mask(enable);
}
/// Clears the predicate set efficiently
CUTLASS_HOST_DEVICE
void enable_mask() {
iterator_.enable_mask();
}
/// Sets the predicate mask, overriding value stored in predicate iterator
CUTLASS_HOST_DEVICE
void set_mask(Mask const &mask) {
iterator_.set_mask(mask);
}
/// Gets the mask
CUTLASS_HOST_DEVICE
void get_mask(Mask &mask) {
iterator_.get_mask(mask);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_pointer_offset(Fragment &frag, Index pointer_offset) {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment &frag) {
load_with_pointer_offset(frag, 0);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) {
iterator_.store_with_pointer_offset(frag, pointer_offset);
}
/// Store a fragment to memory
CUTLASS_DEVICE
void store(Fragment const &frag) {
store_with_pointer_offset(frag, 0);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace transform
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| include/cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h/0 | {
"file_path": "include/cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h",
"repo_id": "include",
"token_count": 9013
} | 40 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines an unsigned 128b integer with several operators to support 64-bit integer division.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cstdint>
#else
#include <cstdint>
#include <cstdlib>
#include <cmath>
#include <type_traits>
#include <stdexcept>
#endif
#include "cutlass/cutlass.h"
/// Optionally enable GCC's built-in type
#if (defined(__x86_64) || defined (__aarch64__)) && !(defined(__CUDA_ARCH__) && ((__CUDACC_VER_MAJOR__ <= 10) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ <= 4)))) && defined(__GNUC__)
#define CUTLASS_UINT128_NATIVE
#elif defined(_MSC_VER) && defined(_M_AMD64) && !(defined(__CUDA_ARCH__) && ((__CUDACC_VER_MAJOR__ <= 10) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ <= 4))))
#define CUTLASS_INT128_ARITHMETIC
#include <intrin.h>
#if _MSC_VER >= 1920 && !defined(__CUDA_ARCH__)
#define CUTLASS_INT128_ARITHMETIC_DIV
#include <immintrin.h>
#endif
#endif
namespace cutlass {
///! Unsigned 128b integer type
struct alignas(16) uint128_t
{
/// Size of one part of the uint's storage in bits
static constexpr int storage_bits_ = 64;
struct hilo
{
uint64_t lo;
uint64_t hi;
};
// Use a union to store either low and high parts or, if present, a built-in 128b integer type.
union {
struct hilo hilo_;
#if defined(CUTLASS_UINT128_NATIVE)
unsigned __int128 native;
#endif // defined(CUTLASS_UINT128_NATIVE)
};
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
uint128_t() : hilo_{0, 0} {}
/// Constructor from uint64
CUTLASS_HOST_DEVICE
uint128_t(uint64_t lo_) : hilo_{lo_, 0} {}
/// Constructor from two 64b unsigned integers
CUTLASS_HOST_DEVICE
uint128_t(uint64_t lo_, uint64_t hi_) : hilo_{lo_, hi_} {}
/// Optional constructor from native value
#if defined(CUTLASS_UINT128_NATIVE)
uint128_t(unsigned __int128 value) : native(value) { }
#endif
/// Lossily cast to uint64
CUTLASS_HOST_DEVICE
explicit operator uint64_t() const
{
return hilo_.lo;
}
CUTLASS_HOST_DEVICE
static void exception()
{
#if defined(__CUDA_ARCH__)
asm volatile (" brkpt;\n");
#else
// throw std::runtime_error("Not yet implemented.");
abort();
#endif
}
/// Add
CUTLASS_HOST_DEVICE
uint128_t operator+(uint128_t const& rhs) const
{
uint128_t y{};
#if defined(CUTLASS_UINT128_NATIVE)
y.native = native + rhs.native;
#else
y.hilo_.lo = hilo_.lo + rhs.hilo_.lo;
y.hilo_.hi = hilo_.hi + rhs.hilo_.hi + (y.hilo_.lo < hilo_.lo);
#endif
return y;
}
/// Subtract
CUTLASS_HOST_DEVICE
uint128_t operator-(uint128_t const& rhs) const
{
uint128_t y{};
#if defined(CUTLASS_UINT128_NATIVE)
y.native = native - rhs.native;
#else
y.hilo_.lo = hilo_.lo - rhs.hilo_.lo;
y.hilo_.hi = hilo_.hi - rhs.hilo_.hi - (rhs.hilo_.lo && y.hilo_.lo > hilo_.lo);
#endif
return y;
}
/// Multiply by unsigned 64b integer yielding 128b integer
CUTLASS_HOST_DEVICE
uint128_t operator*(uint64_t const& rhs) const
{
uint128_t y{};
#if defined(CUTLASS_UINT128_NATIVE)
y.native = native * rhs;
#elif defined(CUTLASS_INT128_ARITHMETIC)
// Multiply by the low part
y.hilo_.lo = _umul128(hilo_.lo, rhs, &y.hilo_.hi);
// Add the high part and ignore the overflow
uint64_t overflow{0};
y.hilo_.hi += _umul128(hilo_.hi, rhs, &overflow);
#else
CUTLASS_UNUSED(rhs);
exception();
#endif
return y;
}
/// Divide 128b operation by 64b operation yielding a 64b quotient
CUTLASS_HOST_DEVICE
uint64_t operator/(uint64_t const& divisor) const
{
uint64_t quotient{0};
#if defined(CUTLASS_UINT128_NATIVE)
quotient = uint64_t(native / divisor);
#elif defined(CUTLASS_INT128_ARITHMETIC_DIV)
// implemented using MSVC's arithmetic intrinsics
uint64_t remainder{0};
quotient = _udiv128(hilo_.hi, hilo_.lo, divisor, &remainder);
#else
CUTLASS_UNUSED(divisor);
exception();
#endif
return quotient;
}
/// Divide 128b operation by 64b operation yielding a 64b quotient
CUTLASS_HOST_DEVICE
uint64_t operator%(uint64_t const& divisor) const
{
uint64_t remainder{0};
#if defined(CUTLASS_UINT128_NATIVE)
remainder = uint64_t(native % divisor);
#elif defined(CUTLASS_INT128_ARITHMETIC_DIV)
// implemented using MSVC's arithmetic intrinsics
(void)_udiv128(hilo_.hi, hilo_.lo, divisor, &remainder);
#else
CUTLASS_UNUSED(divisor);
exception();
#endif
return remainder;
}
/// Computes the quotient and remainder in a single method.
CUTLASS_HOST_DEVICE
uint64_t divmod(uint64_t &remainder, uint64_t divisor) const
{
uint64_t quotient{0};
#if defined(CUTLASS_UINT128_NATIVE)
quotient = uint64_t(native / divisor);
remainder = uint64_t(native % divisor);
#elif defined(CUTLASS_INT128_ARITHMETIC_DIV)
// implemented using MSVC's arithmetic intrinsics
quotient = _udiv128(hilo_.hi, hilo_.lo, divisor, &remainder);
#else
CUTLASS_UNUSED(remainder);
CUTLASS_UNUSED(divisor);
exception();
#endif
return quotient;
}
/// Left-shifts a 128b unsigned integer
CUTLASS_HOST_DEVICE
uint128_t operator<<(int sh) const
{
if (sh == 0) {
return *this;
}
else if (sh >= storage_bits_) {
return uint128_t(0, hilo_.lo << (sh - storage_bits_));
}
else {
return uint128_t(
(hilo_.lo << sh),
(hilo_.hi << sh) | uint64_t(hilo_.lo >> (storage_bits_ - sh))
);
}
}
/// Right-shifts a 128b unsigned integer
CUTLASS_HOST_DEVICE
uint128_t operator>>(int sh) const
{
if (sh == 0) {
return *this;
}
else if (sh >= storage_bits_) {
return uint128_t((hilo_.hi >> (sh - storage_bits_)), 0);
}
else {
return uint128_t(
(hilo_.lo >> sh) | (hilo_.hi << (storage_bits_ - sh)),
(hilo_.hi >> sh)
);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/uint128.h/0 | {
"file_path": "include/cutlass/uint128.h",
"repo_id": "include",
"token_count": 2983
} | 41 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Common data types and string names/tags for them
"""
import enum
from cutlass_library import (
ComplexTransform,
DataType,
DataTypeSize,
EpilogueScheduleType,
KernelScheduleType,
MathOperation,
OpcodeClass,
TileSchedulerType
)
# The following block implements enum.auto() for Python 3.5 variants that don't include it such
# as the default 3.5.2 on Ubuntu 16.04.
#
# https://codereview.stackexchange.com/questions/177309/reimplementing-pythons-enum-auto-for-compatibility
try:
from enum import auto as enum_auto
except ImportError:
__cutlass_library_auto_enum = 0
def enum_auto() -> int:
global __cutlass_library_auto_enum
i = __cutlass_library_auto_enum
__cutlass_library_auto_enum += 1
return i
class DataTypeSizeBytes:
"""
Static class to mimic the `DataTypeSize` dictionary, but with checks for whether the
data type key is less than a full byte or a non-integer number of bytes.
"""
@staticmethod
def __class_getitem__(datatype):
"""
Returns the number of bytes in size the data type is. Raises an exception if the data type
is either less than a full byte or a non-integer number of bytes in size.
:param datatype: data type to query
:return: number of bytes the data type occupies
:rtype: int
"""
bits = DataTypeSize[datatype]
if bits < 8:
raise Exception(
f"Data type {datatype} is less than one byte in size."
)
elif bits % 8 != 0:
raise Exception(
f"Data type datatype is not an integer number of bytes."
)
return bits // 8
class SchedulerMode(enum.Enum):
Device = enum_auto()
Host = enum_auto()
SchedulerModeTag = {
SchedulerMode.Device: "cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly",
SchedulerMode.Host: "cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute",
}
ShortSchedulerModeNames = {SchedulerMode.Device: "Device", SchedulerMode.Host: "Host"}
class FunctionalOp(enum.Enum):
AtomicAdd = enum_auto()
AtomicMaximum = enum_auto()
Divides = enum_auto()
Maximum = enum_auto()
Minimum = enum_auto()
Minus = enum_auto()
Multiplies = enum_auto()
MultiplyAdd = enum_auto()
Plus = enum_auto()
FunctionalOpTag = {
FunctionalOp.AtomicAdd: "cutlass::atomic_add",
FunctionalOp.AtomicMaximum: "cutlass::atomic_maximum",
FunctionalOp.Divides: "cutlass::divides",
FunctionalOp.Maximum: "cutlass::maximum",
FunctionalOp.Minimum: "cutlass::minimum",
FunctionalOp.Minus: "cutlass::minus",
FunctionalOp.Multiplies: "cutlass::multiplies",
FunctionalOp.MultiplyAdd: "cutlass::multiply_add",
FunctionalOp.Plus: "cutlass::plus",
}
class ActivationOp(enum.Enum):
DGelu = enum_auto()
Gelu = enum_auto()
GeluTaylor = enum_auto()
HardSwish = enum_auto()
Identity = enum_auto()
LeakyReLU = enum_auto()
ReLU = enum_auto()
Sigmoid = enum_auto()
SiLU = enum_auto()
Tanh = enum_auto()
ActivationOpTag = {
ActivationOp.DGelu: "cutlass::epilogue::thread::dGELU",
ActivationOp.Gelu: "cutlass::epilogue::thread::GELU",
ActivationOp.GeluTaylor: "cutlass::epilogue::thread::GELU_taylor",
ActivationOp.HardSwish: "cutlass::epilogue::thread::HardSwish",
ActivationOp.Identity: "cutlass::epilogue::thread::Identity",
ActivationOp.LeakyReLU: "cutlass::epilogue::thread::LeakyReLU",
ActivationOp.ReLU: "cutlass::epilogue::thread::ReLu",
ActivationOp.Sigmoid: "cutlass::epilogue::thread::Sigmoid",
ActivationOp.SiLU: "cutlass::epilogue::thread::SiLu",
ActivationOp.Tanh: "cutlass::epilogue::thread::Tanh",
}
def op_tag(op) -> str:
"""
Dispatches `op` to the appropriate *Tag dictionary depending on whether
`op` is an ActivationOp or FunctionalOp. This is useful for cases in which
either type can be used.
:param op: operation to emit a tag for
:type op: ActivationOp | FunctionalOp
:return: tag corresponding to op
:rtype: str
"""
if isinstance(op, ActivationOp):
return ActivationOpTag[op]
elif isinstance(op, FunctionalOp):
return FunctionalOpTag[op]
else:
raise Exception(f"Unexpected op type {op}. Must be one of ActivationOp or FunctionalOp.")
class FloatRoundStyle(enum.Enum):
ToNearest = enum_auto()
ToNearestSatfinite = enum_auto()
Indeterminate = enum_auto()
TowardZero = enum_auto()
TowardInfinity = enum_auto()
TowardNegInfinity = enum_auto()
HalfUlpTruncDntz = enum_auto()
HalfUlpTruncate = enum_auto()
FloatRoundStyleTag = {
FloatRoundStyle.ToNearest: "cutlass::FloatRoundStyle::round_to_nearest",
FloatRoundStyle.ToNearestSatfinite: "cutlass::FloatRoundStyle::round_to_nearest_satfinite",
FloatRoundStyle.Indeterminate: "cutlass::FloatRoundStyle::round_indeterminate",
FloatRoundStyle.TowardZero: "cutlass::FloatRoundStyle::round_toward_zero",
FloatRoundStyle.TowardInfinity: "cutlass::FloatRoundStyle::round_toward_infinity",
FloatRoundStyle.TowardNegInfinity: "cutlass::FloatRoundStyle::round_toward_neg_infinity",
FloatRoundStyle.HalfUlpTruncDntz: "cutlass::FloatRoundStyle::round_half_ulp_trunc_dntz",
FloatRoundStyle.HalfUlpTruncate: "cutlass::FloatRoundStyle::round_half_ulp_truncate",
}
class MathInstruction:
"""
Description of a the lowest-level matrix-multiply-accumulate operation to be used in a kernel
"""
def __init__(
self,
instruction_shape,
element_a,
element_b,
element_accumulator,
opcode_class=OpcodeClass.Simt,
math_operation=MathOperation.multiply_add,
):
"""
:param instruction_shape: size of the [M, N, K] dimensions of the instruction
:type instruction_shape: list or tuple
:param element_a: data type of operand A
:param element_b: data type of operand B
:param element_accumulator: data type used in accumulation
:param opcode_class: higher-level class of the instruction (e.g., SIMT or Tensor Core)
:type opcode_class: cutlass_library.library.OpcodeClass
:param math_operation: the type of low-level operation to be performed (e.g., multiply accumulate)
:type math_operation: MathOperation
"""
self.instruction_shape = instruction_shape
self.element_a = element_a
self.element_b = element_b
self.element_accumulator = element_accumulator
self.opcode_class = opcode_class
self.math_operation = math_operation
class TileDescription:
"""
Description of a tile of computation to be performed in the kernel, encompassing threadblock, cluster, and warp shapes,
stage count, and math instruction specification
"""
def __init__(
self,
threadblock_shape,
stages,
warp_count,
math_instruction,
cluster_shape=[1, 1, 1],
kernel_schedule: KernelScheduleType = None,
epilogue_schedule: EpilogueScheduleType = None,
tile_scheduler: TileSchedulerType = None
):
"""
:param threadblock_shape: shape of a threadblock tyle
:type threadblock_shape: list or tuple
:param stages: number of pipline stages in the operation. For SM90 kernels, this can be set to `None` and the maximum
number of stages that can be supported for an operation on a given architecture will be computed at a later time
:type stages: int or None
:param warp_count: number of warps in each [M, N, K] dimension of a threadblock tile
:type warp_count: list, tuple, or None
:param math_instruction: specification of the instruction type and shape to be performed and the types of its operands
:type math_instruction: MathInstruction
:param cluster_shape: number of threadblocks in the [X, Y, Z] dimensions of a threadblock cluster
:param kernel_schedule: type of kernel schedule to use (only available for SM90+)
:type kernel_schedule: cutlass_library.KernelScheduleType
:param epilogue_schedule: type of epilogue schedule to use (only available for SM90+)
:type epilogue_schedule: cutlass_library.EpilogueScheduleType
:param tile_scheduler: type of tile scheduler to use (only available for SM90+)
:type tile_scheduler: cutlass_library.TileSchedulerType
"""
if ((kernel_schedule is None and epilogue_schedule is not None) or
(kernel_schedule is not None and epilogue_schedule is None)):
raise Exception("Kernel and epilogue schedule must either both be Auto or neither be Auto.")
self.threadblock_shape = threadblock_shape
self.cluster_shape = cluster_shape
self.kernel_schedule = kernel_schedule
self.epilogue_schedule = epilogue_schedule
self.tile_scheduler = tile_scheduler
self.stages = stages
self.math_instruction = math_instruction
self.instruction_shape = math_instruction.instruction_shape
# Number of warps along x, y, z directions
self.warp_count = warp_count
def clone_and_update(self, td: dict):
attrs = {
"cluster_shape": None,
"threadblock_shape": None,
"warp_count": None,
"stages": None,
"instruction_shape": None,
"kernel_schedule": None,
"epilogue_schedule": None,
"tile_scheduler": None
}
for key in attrs.keys():
if key in td.keys():
attrs[key] = td[key]
else:
attrs[key] = getattr(self, key)
attrs["math_instruction"] = MathInstruction(
attrs["instruction_shape"],
self.math_instruction.element_a,
self.math_instruction.element_b,
self.math_instruction.element_accumulator,
self.math_instruction.opcode_class,
self.math_instruction.math_operation
)
# Remove the instruction shape
del attrs["instruction_shape"]
return TileDescription(**attrs)
@property
def num_threads(self):
"""
Returns the number of threads in the threadblock
:return: number of threads in the threadblock
:rtype: int or None (if warp count is None)
"""
if self.warp_count is not None:
threads = 32
for cnt in self.warp_count:
threads *= cnt
return threads
return None
def procedural_name(self):
"""
Returns a name identifying the tile description
:return: name identifying the tile description
:rtype: int
"""
emit_stages = 0 if self.stages is None else self.stages
name = "%dx%dx%d_%dx%d_%dx%d" % (
self.cluster_shape[0],
self.cluster_shape[1],
self.cluster_shape[2],
self.threadblock_shape[0],
self.threadblock_shape[1],
self.threadblock_shape[2],
emit_stages
)
return name
def procedural_name_2x(self):
"""
Returns a name identifying the tile description
:return: name identifying the tile description
:rtype: int
"""
return "%dx%d_%dx%d" % (self.threadblock_shape[0], self.threadblock_shape[1], self.threadblock_shape[2], self.stages)
def __str__(self):
"""
Returns a string with containing each of the tile description's values
:return: contents of tile description
:rtype: str
"""
if self.kernel_schedule is not None:
kschedule = self.kernel_schedule
else:
kschedule = KernelScheduleType.ScheduleAuto
if self.epilogue_schedule is not None:
eschedule = self.epilogue_schedule
else:
eschedule = EpilogueScheduleType.ScheduleAuto
if self.tile_scheduler is not None:
tschedule = self.tile_scheduler.name
else:
tschedule = "None"
return f"""
{{
ClusterShape: {self.cluster_shape}
ThreadblockShape: {self.threadblock_shape}
WarpCount: {self.warp_count}
Stages: {self.stages if self.stages is not None else 'Auto'}
InstructionShape: {self.math_instruction.instruction_shape}
Kernel schedule: {kschedule.name}
Epilogue schedule: {kschedule.name}
TileScheduler: {tschedule}
}}"""
class TensorDescription:
def __init__(self, element, layout, alignment=1, complex_transform=ComplexTransform.none):
self.element = element
self.layout = layout
if element != DataType.void:
self.alignment = min(128 // DataTypeSize[self.element], alignment)
else:
self.alignment = alignment
self.complex_transform = complex_transform
def CalculateSmemUsagePerStage(operation):
"""
Returns the amount of shared memory in bytes consumed in a single stage of a kernel.
:param op: operation for which the maximum stages should be computed. If stages are
set via the `op.tile_description.stages` parameter, this setting is ignored
in the present calculation
:type op: cutlass.backend.Operation
:return: number of bytes of shared memory consumed by a single stage
:rtype: int
"""
m, n, k = operation.tile_description.threadblock_shape
if operation.operation_kind == OperationKind.Gemm:
stage_barrier_bytes = 32
return (
(DataTypeSize[operation.A.element] * m * k // 8)
+ (DataTypeSize[operation.B.element] * k * n // 8)
+ stage_barrier_bytes
)
else:
raise Exception("Unsupported operation kind {}.".format(operation.operation_kind))
def CalculateSmemUsage(operation):
"""
Returns the amount of shared memory in bytes consumed by a kernel.
:param op: operation for which the maximum stages should be computed. If stages are
set via the `op.tile_description.stages` parameter, this setting is ignored
in the present calculation
:type op: cutlass.backend.Operation
:return: int
"""
return operation.tile_description.stages * CalculateSmemUsagePerStage(operation)
class ApiVersion(enum.Enum):
"""
Differentiate between CUTLASS 2.x and 3.x API versions
"""
v2x = enum_auto()
v3x = enum_auto()
def api_version(arch, opclass, dtype):
"""
Returns whether the architecture, opcode class, and datatype in question require using CUTLASS 2.x
or 3.x for code emission.
:param arch: compute capability of device on which to run
:type arch: int
:param opclass: class of the operation being performed
:type opclass: cutlass_library.OpcodeClass
:param dtype: data type to be used in operation (assumes that ElementA and ElementB are the same)
:type dtype: cutlass_library.DataType
:return: API version to be used in code emission
:rtype: ApiVersion
"""
if (arch >= 90 and
opclass == OpcodeClass.TensorOp and
(dtype != DataType.f64)):
return ApiVersion.v3x
else:
return ApiVersion.v2x
class EmissionType(enum.Enum):
"""
Tags for whether to emit a kernel- or device-level operation
"""
Kernel = enum_auto()
Device = enum_auto()
| python/cutlass/backend/library.py/0 | {
"file_path": "python/cutlass/backend/library.py",
"repo_id": "python",
"token_count": 6615
} | 42 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Ease-of-use interface for constructing, compiling, and running GEMMs.
The ``Gemm`` interface is meant to allow one to easily instantiate, compile, and run
GEMM operations in CUTLASS via Python, without specifying many configuration parameters.
Under the hood, the interface will select sensible default parameters for the many template
parameters for CUTLASS GEMMs.
Note: optimal performance is not to be expected from this interface. To achieve optimal
performance, one should specify and tune each configuration parameter.
The simplest example of using this interface is the following:
.. highlight:: python
.. code-block:: python
# A, B, C, and D are torch/numpy/cupy tensor objects
plan = cutlass.op.Gemm(A, B, C, D)
plan.run()
One can also use the interface by specifying data types of operands at construction
and using different tensor objects with these data types at runtime:
.. highlight:: python
.. code-block:: python
# The following is shorthand for:
# cutlass.op.Gemm(element_A=torch.float32, element_B=torch.float32,
# element_C=torch.float32, element_D=torch.float32,
# element_accumulator=torch.float32,
# layout=cutlass.LayoutType.RowMajor)
plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass.LayoutType.RowMajor)
A0 = torch.rand((128, 256), device='cuda')
B0 = torch.rand((256, 64), device='cuda')
C0 = torch.zeros((128, 64), device='cuda')
D0 = torch.zeros((128, 64), device.'cuda')
plan.run(A0, B0, C0, D0)
A = torch.rand((32, 128), device='cuda')
B = torch.rand((128, 256), device='cuda')
C = torch.zeros((32, 256), device='cuda')
D = torch.zeros((32, 256), device.'cuda')
plan.run(A1, B1, C1, D1)
The interface additionally enables one to decouple the compilation of the underlying CUTLASS
kernel from its execution:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor)
plan.compile()
# Do other work...
plan.run(A0, B0, C0, D0)
# Do other work...
plan.run(A1, B1, C1, D1)
Elementwise activation functions are easily fused to the GEMM via the interface:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor)
plan.activation = cutlass.epilogue.relu
Operations can also be run asynchronously:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor)
args = plan.run()
# Do other work...
args.sync()
"""
from math import prod
from cuda import cuda
from cutlass_library import (
DataType,
DataTypeSize,
GemmUniversalMode,
)
import cutlass
from cutlass import epilogue, swizzle
from cutlass.backend import compiler
from cutlass.backend.evt import EpilogueFunctorVisitor
from cutlass.backend.gemm_operation import GemmArguments, GemmOperationUniversal
from cutlass.backend.library import TensorDescription, TileDescription
from cutlass.op.op import OperationBase
from cutlass.shape import GemmCoord
from cutlass.utils import check, datatypes
class Gemm(OperationBase):
"""
Constructs a ``Gemm`` object.
The data types and layouts of operands A, B, and C, along with the data type of output D
and that used for accumulation, are bound to the ``Gemm`` object throughout its lifetime --
these are not to be changed after a ``Gemm`` has been constructed.
The constructor has optional parameters for flexibly setting these parameters. The following
constructors are equivalent:
.. highlight:: python
.. code-block:: python
# Use F32 for A, B, C, D, and accumulation. All operands are row major.
# Use the generic ``element`` and ``layout`` parameters to concisely set all data types and layouts
# for operands to the same values.
Gemm(element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor)
# Explicitly specify the data types to use for A, B, C, and D. Use the generic ``layout``.
Gemm(element_A=cutlass.DataType.f32, element_B=cutlass.DataType.f32, element_C=cutlass.DataType.f32,
element_D=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor)
# Set the data types and elements from existing tensors. Note that one can use different tensors when
# executing GEMM via the ``run()`` method than passed in here (though those passed in to ``run()`` must
# have the same data type and layout as those passed in here).
# A, B, C, and D are row-major torch.Tensor objects of type torch.float32
Gemm(A=A, B=B, C=C, D=D)
# Use the generic ``element`` and explicitly specify the layouts to use for A, B, and C (layout of D is
# the same as that for D, at present)
Gemm(element=cutlass.DataType.f32, layout_A=cutlass.LayoutType.RowMajor,
layout_B=cutlass.LayoutType.RowMajor, layout_C=cutlass.LayoutType.RowMajor)
# Explicitly specify the data type and layout for only some of A, B, C, and D. Unspecified data types
# and layouts will inherit those passed in via the generic ``element`` and ``layout``
Gemm(element_A=cutlass.DataType.f32, layout_B=cutlass.LayoutType.RowMajor,
element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor)
The order of precedence for the setting of the data type and layout for a given operand/output is as follows:
1) If the tensor type is specified (e.g., ``A``), use the data type and layout inferred from this tensor
2) Otherwise, if the data type/layout (e.g., ``element_A``, ``layout_A``) is specified, use those
3) Otherwise, use the generic values (e.g., ``element``, ``layout``)
:param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90
:type cc: int
:param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80
:type kernel_cc: int
:param A: tensor representing data type and layout of operand A
:param B: tensor representing data type and layout of operand B
:param C: tensor representing data type and layout of operand C
:param D: tensor representing data type and layout of operand D
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param element_accumulator: data type to be used in accumulation of the product of operands A and B
:type element_accumulator: cutlass.DataType
:param element: generic data type to be used for operands A, B, C, D, as well as the accumulation data type
:type element: cutlass.DataType
:param layout: generic layout type to be used for operands A, B, C, and D
:type layout: cutlass.LayoutType
:param element_A: data type to be used for operand A
:type element_A: cutlass.DataType
:param element_B: data type to be used for operand B
:type element_B: cutlass.DataType
:param element_C: data type to be used for operand C
:type element_C: cutlass.DataType
:param element_D: data type to be used for operand D
:type element_D: cutlass.DataType
:param layout_A: layout of operand A
:type layout_A: cutlass.LayoutType
:param layout_B: layout of operand B
:type layout_B: cutlass.LayoutType
:param layout_C: layout of operand C
:type layout_C: cutlass.LayoutType
:param layout_D: layout of operand D
:type layout_D: cutlass.LayoutType
"""
def __init__(
self, A=None, B=None, C=None, D=None,
alpha=1.0, beta=0.0, element_accumulator=None,
element=None, layout=None,
element_A=None, element_B=None, element_C=None, element_D=None,
layout_A=None, layout_B=None, layout_C=None,
cc: int = None, kernel_cc: int = None
):
super().__init__(cc=cc, kernel_cc=kernel_cc)
self.name = "gemm"
self.compiled = False
elements = []
layouts = []
# Check that at least one of the following is set for each tensor (illustrated assuming tensor A):
# ``A``, ``element_A``, ``element`` and ``A``, ``layout_A``, ``layout``
for elt, lay, tens, name in zip([element_A, element_B, element_C, element_D],
[layout_A, layout_B, layout_C, layout_C],
[A, B, C, D],
["A", "B", "C", "D"]):
if elt is not None and tens is not None:
raise Exception(f'Must not specify both element_{name} and tensor {name}')
if lay is not None and tens is not None:
raise Exception(f'Must not specify both layout_{name} and tensor {name}')
if elt is None and tens is None and element is None:
raise Exception(f'Must specify one of element_{name}, tensor {name}, or generic element.')
if lay is None and tens is None and layout is None:
raise Exception(f'Must specify one of layout_{name}, tensor {name}, or generic layout.')
elt_to_set = None
lay_to_set = None
if tens is not None:
elt_to_set, lay_to_set = datatypes.get_datatype_and_layout(tens)
else:
elt_to_set = elt if elt is not None else element
lay_to_set = lay if lay is not None else layout
elements.append(datatypes.library_type(elt_to_set))
layouts.append(lay_to_set)
self._element_a, self._element_b, self._element_c, self._element_d = elements
self._layout_a, self._layout_b, self._layout_c, self._layout_d = layouts
if element_accumulator is None:
self._element_accumulator = self._element_c
else:
self._element_accumulator = datatypes.library_type(element_accumulator)
self.A = A
self.B = B
self.C = C
self.D = D
self.alpha = alpha
self.beta = beta
self.epilogue_functor = None
self.op_class = None
self._tile_description = None
self._reset_operations()
self._swizzling_functor = cutlass.swizzle.IdentitySwizzle1
def _reset_operations(self, reset_epilogue: bool = True):
# Set the default op class
datatype_comb = (self._element_a, self._element_b, self._element_accumulator)
layout_comb = (self._layout_a, self._layout_b)
self.possible_op_classes = self.options.supporting_opclasses(
self._element_a, self._element_b, self._element_accumulator,
self._layout_a, self._layout_b, self._math_operation)
if cutlass.OpcodeClass.TensorOp in self.possible_op_classes:
self.opclass = cutlass.OpcodeClass.TensorOp
elif cutlass.OpcodeClass.Simt in self.possible_op_classes:
self.opclass = cutlass.OpcodeClass.Simt
else:
if self._math_operation is not None:
math_op_str = f' and math operation {self._math_operation}'
else:
math_op_str = ''
raise Exception(f'No kernel configuration found for supported data type and layout '
f'combination {datatype_comb}x{layout_comb}{math_op_str}')
if reset_epilogue:
self._reset_epilogue_functor_activation(cutlass.epilogue.identity)
@property
def swizzling_functor(self):
"""
Returns the type of the swizzling functor currently being used by the GEMM
:return: swizzing functor type
"""
return self._swizzling_functor
@swizzling_functor.setter
def swizzling_functor(self, swizzling_functor):
"""
Sets the swizzling functor to the type specified by `swizzling_functor`
"""
if swizzling_functor == cutlass.swizzle.ThreadblockSwizzleStreamK:
if self.op_class == cutlass.OpcodeClass.Simt:
raise Exception('ThreadblockSwizzleStreamK is currently only supported with opcode class TensorOp')
if self.current_cc == 90:
raise Exception('ThreadblockSwizzleStreamK is currently unsupported on SM90')
self._swizzling_functor = swizzling_functor
#
# Tile description Related
#
@property
def tile_description(self) -> TileDescription:
"""
Returns the tile description
"""
return self._tile_description
@tile_description.setter
def tile_description(
self, td=None):
"""
Set the tile description
:param td: tile description
:type td: cutlass.backend.TileDescription, or a dict with keys
{
"threadblock_shape": [int, int, int],
"warp_count": [int, int, int],
"stages": int,
"instruction_shape": [int, int, int] (optional),
"cluster_shape": [int, int, int] (optional)
}
"""
if td is None:
return
if isinstance(td, dict):
if self._tile_description is None:
op = self.possible_operations.default_operation(self._math_operation)
self._tile_description = datatypes.td_from_profiler_op(op)
td = self._tile_description.clone_and_update(td)
valid, msg = self._valid_tile_description(td)
if valid:
self._tile_description = td
else:
raise Exception(msg)
def _valid_tile_description(self, td: TileDescription) -> tuple:
"""
Checks whether the provided tile description is valid for the given compute capability. At present,
this checks the following:
- Does the tile description use a number of stages supported by the compute capability in question?
- Does the tile size requested fit within shared memory?
- Are cluster dimensions outside the valid range requested for a given architecture (e.g.,
more non-unit cluster dimensions for pre-SM90 architectures)?
- Is the kernel schedule being used supported on the architecture in question?
:param td: tile description to validate
:type td: cutlass.backend.TileDescription
:return: tuple in which the first element is a bool indicating that the tile description is valid
and the second element is a string providing an optional error message.
:rtype: tuple
"""
valid, msg = check.valid_stage_count(self.cc, self.current_cc, td, self._element_c, self._element_d)
if not valid:
return (valid, msg)
valid, msg = check.valid_cluster_shape(self.current_cc, td.cluster_shape)
if not valid:
return (valid, msg)
valid, msg = check.valid_schedule(self.current_cc, td.kernel_schedule, td.epilogue_schedule, td.tile_scheduler)
return valid, msg
def tile_descriptions(self) -> list:
"""
Returns a list of valid tile descriptions for the operations
:returns: list of valid tile descriptions for the operations
:rtype: list
"""
tds = [datatypes.td_from_profiler_op(op) for op in self.possible_operations.all_operations]
if self._math_operation is not None:
tds = [td for td in tds if td.math_instruction.math_operation == self._math_operation]
return tds
def construct(
self, tile_description: TileDescription = None,
alignment_A: int = None, alignment_B: int = None, alignment_C: int = None) -> GemmOperationUniversal:
"""
Constructs a ``cutlass.backend.GemmUniversalOperation`` based on the input parameters and current
kernel specification of the ``Gemm`` object.
:param tile_description: tile description specifying shapes and operand types to use in the kernel
:type tile_description: cutlass.backend.TileDescription
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:param alignment_C: alignment of operand C
:type alignment_C: int
:return: operation that was constructed
:rtype: cutlass.backend.GemmOperationUniversal
"""
alignment_pref_A = min(128 // DataTypeSize[self._element_a], max(self.possible_operations.alignments("A")))
alignment_pref_B = min(128 // DataTypeSize[self._element_b], max(self.possible_operations.alignments("B")))
alignment_A = check.alignment_or_default(alignment_A, alignment_pref_A)
alignment_B = check.alignment_or_default(alignment_B, alignment_pref_B)
tensor_A = TensorDescription(self._element_a, self._layout_a, alignment_A)
tensor_B = TensorDescription(self._element_b, self._layout_b, alignment_B)
if alignment_C is None:
alignment_C = max(self.possible_operations.alignments("C"))
if self._element_c != DataType.void:
alignment_C = min(128 // DataTypeSize[self._element_c], alignment_C)
if tile_description is None:
if self._tile_description is None:
op = self.possible_operations.operations(alignment_A, alignment_B, alignment_C, self._math_operation)[0]
tile_description = datatypes.td_from_profiler_op(op)
# The selected op may have lower alignment than that determined above, so we must
# reset alignment here.
alignment_C = op.C.alignment
else:
tile_description = self._tile_description
else:
valid, err_str = self._valid_tile_description(tile_description)
if not valid:
raise Exception(f"Invalid tile description. {err_str}")
self._tile_description = tile_description
tensor_C = TensorDescription(self._element_c, self._layout_c, alignment_C)
self.epilogue_functor = self._reset_epilogue_functor_alignment(alignment_C, self.epilogue_functor)
operation = GemmOperationUniversal(
arch=self.current_cc,
tile_description=tile_description,
A=tensor_A, B=tensor_B, C=tensor_C,
epilogue_functor=self.epilogue_functor,
swizzling_functor=self._swizzling_functor,
)
return operation
def compile(self, tile_description: TileDescription = None,
alignment_A: int = None, alignment_B: int = None, alignment_C: int = None,
print_module: bool = False) -> cutlass.backend.GemmOperationUniversal:
"""
Emits and compiles the kernel currently specified. If ``tile_description`` and any
of the ``alignment`` parameters are set, the kernel will be chosen using this
tile description and alignments. Otherwise, a default tile description and alignment
will be used.
:param tile_description: tile description specifying shapes and operand types to use in the kernel
:type tile_description: cutlass.backend.TileDescription
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:param alignment_C: alignment of operand C
:type alignment_C: int
:param print_module: whether to print the emitted C++ code
:type print_module: bool
:return: operation that was compiled
:rtype: cutlass.backend.GemmOperationUniversal
"""
self.operation = self.construct(tile_description, alignment_A, alignment_B, alignment_C)
if print_module:
print(self.operation.rt_module.emit())
compiler.add_module([self.operation,])
return self.operation
def _verify_rank(self, tensor):
"""
Verifies that ``tensor`` has rank greater than 1
:param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type tensor: numpy/cupy/torch array/tensor object
"""
if len(tensor.shape) < 2:
raise Exception(f"Tensors must be of rank greater than 1. Received tensor of shape: {tensor.shape}")
def _get_batch_count(self, A, B, C, D) -> int:
"""
Returns the batch count specified by the tensors A, B, C, and D and verifies that these
tensors match in batch size. Presence of a batch dimension is detected by one of the
tensors being rank 3. If a batch dimension is present, it must be present in one of
operands A, B, or C (but need not be in all), and must be present in D.
:param A: tensor A
:type A: numpy/cupy/torch array/tensor object
:param B: tensor B
:type B: numpy/cupy/torch array/tensor object
:param C: tensor C
:type C: numpy/cupy/torch array/tensor object
:param D: tensor D
:type D: numpy/cupy/torch array/tensor object
:return: tuple of batch count dimensions
:rtype: tuple
"""
A_batch = prod(A.shape[:-2]) if len(A.shape) > 2 else 1
B_batch = prod(B.shape[:-2]) if len(B.shape) > 2 else 1
if 1 not in [A_batch, B_batch]:
if A_batch != B_batch:
raise Exception(f"Get invalid batch counts: A={A_batch}, B={B_batch}")
return max(A_batch, B_batch)
def _get_batch_stride(self, tensor) -> int:
"""
Returns the batch stride of ``tensor``. If ``tensor`` is only rank-2, batch stride is 0.
:param tensor: tensor object to process
:type tensor: numpy/cupy/torch array/tensor object
:return: stride between each matrix in the batch
:rtype: int
"""
if tensor is not None and len(tensor.shape) > 2:
return tensor.shape[-2] * tensor.shape[-1]
else:
return 0
def _get_problem_args(self, A, B, C, D) -> tuple:
"""
Returns the problem size and GEMM universal mode to use for the
given operands.
:param A: tensor A
:type A: numpy/cupy/torch array/tensor object
:param B: tensor B
:type B: numpy/cupy/torch array/tensor object
:param C: tensor C
:type C: numpy/cupy/torch array/tensor object
:param D: tensor D
:type D: numpy/cupy/torch array/tensor object
:return: tuple containing the problem size (cutlass.shape.GemmCoord), the GEMM mode (cutlass.GemmUniversalMode), and the batch count (int)
:rtype: tuple
"""
M, K = A.shape[-2:]
N = B.shape[-1]
mode = GemmUniversalMode.Gemm
batch_count = self._get_batch_count(A, B, C, D)
returned_batch_count = batch_count
# If we are running a batched GEMM in which there is a nonzero batch stride
# only for A, then we can fold the batched dimension of A into the M dimension
# (i.e., (b, m, k) x (k, n) -> (m*b, k) x (k, n)). This works only if both A
# and C are row major. A similar operation can be performed if only B has a nonzero
# batch dimension
if batch_count > 1:
A_row = self._layout_a == cutlass.LayoutType.RowMajor
B_row = self._layout_b == cutlass.LayoutType.RowMajor
C_row = self._layout_c == cutlass.LayoutType.RowMajor
# Consider a Tensor to be batched if its rank is > 2 and
# the product of the modes beyond rank 2 equals our pre-determined batch size.
batched = lambda x : x is None or (len(x.shape) > 2 and prod(x.shape[:-2]) == batch_count)
if batched(A) and not batched(B) and (C is None or batched(C)) and A_row and C_row:
M *= batch_count
returned_batch_count = 1
elif not batched(A) and batched(B) and (C is None or batched(C)) and not B_row and not C_row:
N *= batch_count
returned_batch_count = 1
else:
mode = GemmUniversalMode.Batched
return GemmCoord(M, N, K), mode, returned_batch_count
def _verify_type_and_layout(self, tensor, ref_type, ref_layout, name):
"""
Verifies that ``tensor`` has data type ``ref_type`` and layout ``ref_layout``. An exception
is raised if it does not.
:param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type tensor: numpy/cupy/torch array/tensor object
:param ref_dtype: data type for the tensor that this object was initialized to
:param ref_layout: layout for the tensor that this object was initialized to
:param name: identifier of the tensor to verify. Used in raising exceptions
:type name: str
"""
dtype, layout = datatypes.get_datatype_and_layout(tensor)
if dtype != ref_type or layout != ref_layout:
try:
# Attempt to transpose the tensor to fit the desired layout
tensor = tensor.transpose(-1, -2)
except:
raise Exception(f'Tensor {name} with type and layout ({dtype}, {layout}) '
f'does not match the expected type and '
f'layout of ({ref_type}, {ref_layout}) and transpose failed.')
def run(self, A=None, B=None, C=None, D=None,
alpha=None, beta=None, sync: bool = True, print_module: bool = False, visitor_args: dict = None,
stream: cuda.CUstream = cuda.CUstream(0)) -> GemmArguments:
"""
Runs the kernel currently specified. If it has not already been, the kernel is emitted and
compiled. Tensors holding operands and outputs of the kernel are sourced either from the
``A``, ``B``, ``C``, ``D``, ``alpha``, and ``beta``
parameters provided in this call, or from those
passed in on the construction of this object -- one of the two must be specified.
By default, this call returns only once the kernel has completed. To launch the kernel
and immediately return, set ``sync=False``. In this case, it is the responsibility of the
caller to syncrhonize the results of the kernel before attempting to access outputs
by calling ``sync()`` on the arguments returned from this call.
:param A: tensor representing data type and layout of operand A
:param B: tensor representing data type and layout of operand B
:param C: tensor representing data type and layout of operand C
:param D: tensor representing data type and layout of operand D
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param sync: whether the call should wait for the kernel to complete before returning
:type sync: bool
:param print_module: whether to print the emitted C++ code
:type print_module: bool
:param stream: cuda stream, defaults to cuda.cuda.CUstream(0)
:type stream: :class:`cuda.cuda.CUstream`
:return: arguments passed in to the kernel
:rtype: cutlass.backend.GemmArguments
"""
super().run_setup()
A = self._verify_tensor(A, self.A, self._element_a, self._layout_a, "A")
B = self._verify_tensor(B, self.B, self._element_b, self._layout_b, "B")
C = self._verify_tensor(C, self.C, self._element_c, self._layout_c, "C")
D = self._verify_tensor(D, self.D, self._element_d, self._layout_d, "D")
alpha = self._verify_scalar(alpha, self.alpha, self._element_c, "alpha")
beta = self._verify_scalar(beta, self.beta, self._element_c, "beta")
is_void_c = self._element_c == DataType.void
self._verify_rank(A)
self._verify_rank(B)
if not is_void_c:
self._verify_rank(C)
self._verify_rank(D)
alignment_a = self.possible_operations.find_alignment(A.shape, self._layout_a, operand="A")
alignment_b = self.possible_operations.find_alignment(B.shape, self._layout_b, operand="B")
# Set C alignment based on D.shape so as to correctly get an alignment with void-C
# kernels, for which `C` is None.
alignment_c = self.possible_operations.find_alignment(D.shape, self._layout_c, operand="C")
self.compile(self._tile_description, alignment_A=alignment_a, alignment_B=alignment_b,
alignment_C=alignment_c, print_module=print_module)
problem_size, mode, batch_count = self._get_problem_args(A, B, C, D)
if mode == GemmUniversalMode.Gemm or batch_count == 1:
kwargs = {'split_k_slices': 1}
else:
kwargs = {
'batch': batch_count,
'batch_strides': {
'A': self._get_batch_stride(A),
'B': self._get_batch_stride(B),
'C': self._get_batch_stride(C),
'D': self._get_batch_stride(D)
}
}
kwargs['stream'] = stream
if isinstance(self.epilogue_functor, EpilogueFunctorVisitor):
output_op = self.operation.epilogue_type(visitor_args)
else:
output_op = self.operation.epilogue_type(alpha, beta)
arguments = GemmArguments(
operation=self.operation, problem_size=problem_size,
A=A, B=B, C=C, D=D,
output_op=output_op,
gemm_mode=mode,
**kwargs
)
self.operation.run(arguments)
if sync:
arguments.sync()
return arguments
| python/cutlass/op/gemm.py/0 | {
"file_path": "python/cutlass/op/gemm.py",
"repo_id": "python",
"token_count": 12897
} | 43 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for filtering CUTLASS library kernels and emitting library intitialization
and building code
"""
import enum
import logging
import os.path
import shutil
try:
import builtins
if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True:
raise ImportError("Disabling attempt to import cutlass_library")
from cutlass_library.library import *
from cutlass_library.gemm_operation import *
from cutlass_library.rank_k_operation import *
from cutlass_library.rank_2k_operation import *
from cutlass_library.trmm_operation import *
from cutlass_library.symm_operation import *
from cutlass_library.conv2d_operation import *
from cutlass_library.conv3d_operation import *
except ImportError:
from library import *
from gemm_operation import *
from rank_k_operation import *
from rank_2k_operation import *
from trmm_operation import *
from symm_operation import *
from conv2d_operation import *
from conv3d_operation import *
###################################################################################################
_LOGGER = logging.getLogger(__name__)
class EmitOperationKindAll:
"""
Emit the OperationKind-level CUTLASS library initialization code.
The code is generated in the {generated_path}/{operation_kind} directory
(e.g., tools/library/generated/gemm in the build directory,
for OperationKind=Gemm), in the all_{operation_kind}_operations.cu file
(e.g., all_gemm_operations.cu for OperationKind=Gemm).
That file declares several functions in namespace cutlass::library.
The functions all have this form,
void initialize_{configuration_name}(Manifest& manifest);
The file also _defines_ the following function in that namespace.
void initialize_all_{operation_kind}_operations(Manifest& manifest);
That function calls all of the functions declared in this file.
Those functions are defined in subdirectories
(which this class does not create).
"""
def __init__(self, generated_path, kind, args):
self.generated_path = generated_path
self.kind = kind
self.args = args
self.header_template ="""
/*
Generated by manifest.py - Do not edit.
*/
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.entry_template = """
//
// Entry point to construct operations
//
void initialize_all_${operation_name}_operations(Manifest &manifest) {
"""
self.configuration_prototype_template = "void initialize_${configuration_name}(Manifest &manifest);\n"
self.configuration_template =" initialize_${configuration_name}(manifest);\n"
self.epilogue_template ="""}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
"""
#
def __enter__(self):
_LOGGER.debug("*** EmitOperationKindAll::__enter__")
self.operation_path = os.path.join(self.generated_path, OperationKindNames[self.kind])
_LOGGER.debug('*** operation_path (directory to create): ' +
str(self.operation_path));
os.makedirs(self.operation_path, exist_ok=True)
self.top_level_path = os.path.join(self.operation_path, f"all_{OperationKindNames[self.kind]}_operations.cu")
_LOGGER.debug(f"*** top_level_path (file to write): {str(self.top_level_path)}")
self.top_level_file = open(self.top_level_path, "w")
self.top_level_file.write(self.header_template)
self.source_files = [self.top_level_path,]
self.configurations = []
return self
#
def emit(self, operations):
_LOGGER.debug('*** EmitOperationKindAll::emit')
_LOGGER.debug(f"*** len(operations): {len(operations)}")
_LOGGER.debug(f"*** min_cc list: {sorted(min_cc for min_cc, _ in operations.items())}")
for min_cc, configurations in sorted(operations.items()):
_LOGGER.debug(f"*** min_cc={min_cc}")
for configuration_name, _ in configurations.items():
_LOGGER.debug(f"*** configuration_name={configuration_name}")
self.configurations.append(configuration_name)
self.top_level_file.write(SubstituteTemplate(self.configuration_prototype_template, {'configuration_name': configuration_name} ))
#
def __exit__(self, exception_type, exception_value, traceback):
_LOGGER.debug("*** EmitOperationKindAll::__exit__")
self.top_level_file.write(SubstituteTemplate(self.entry_template, {'operation_name': OperationKindNames[self.kind]}))
for configuration_name in self.configurations:
self.top_level_file.write(SubstituteTemplate(self.configuration_template, {'configuration_name': configuration_name}))
self.top_level_file.write(self.epilogue_template)
self.top_level_file.close()
class EmitOperationKindLibrary:
"""
Emit the CUTLASS library initialization code for each OperationKind.
The code is generated in the directory
{generated_path}/{operation_kind}/{min_cc}
(e.g., tools/library/generated/gemm/90 in the build directory,
for min_cc=90 and OperationKind=Gemm), in the file
all_sm{min_cc}_{operation_kind}_operations.cu
(e.g., all_sm90_gemm_operations.cu for min_cc=90 and OperationKind=Gemm).
The min_cc variable here indicates the minimum GPU architecture version
that the things to be initialized require.
For example, min_cc=90 indicates sm90.
That file declares several functions in namespace cutlass::library.
The functions all have this form,
void initialize_all_sm{min_cc}_{subclass_name}_{extended_name}_operations(Manifest& manifest);
where extended_name is operation.extended_name() for all the operations
given to the emit method (which see below). (All operations for a given
configuration_name are guaranteed to have the same extended_name().)
The file also _defines_ the following function in that namespace.
void initialize_all_sm{min_cc}__{operation_kind}_operations(Manifest& manifest);
That function calls all of the functions declared in this file.
Those functions are defined in subdirectories.
The mapping from OperationKind to emitter handles the details
of what happens in each of those subdirectories.
"""
def __init__(self, generated_path, min_cc, kind, args):
self.generated_path = generated_path
self.min_cc = min_cc
self.kind = kind
self.args = args
self.emitters = {
OperationKind.Gemm: EmitGemmConfigurationLibrary,
OperationKind.Conv2d: EmitConv2dConfigurationLibrary,
OperationKind.Conv3d: EmitConv3dConfigurationLibrary,
OperationKind.RankK: EmitRankKConfigurationLibrary,
OperationKind.Rank2K: EmitRank2KConfigurationLibrary,
OperationKind.Trmm: EmitTrmmConfigurationLibrary,
OperationKind.Symm: EmitSymmConfigurationLibrary
}
self.header_template ="""
/*
Generated by manifest.py - Do not edit.
*/
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.entry_template = """
//
// Entry point to construct operations
//
void initialize_all_sm${min_cc}_${subclass_name}_${operation_name}_operations(Manifest &manifest) {
"""
self.configuration_prototype_template = "void initialize_${configuration_name}(Manifest &manifest);\n"
self.configuration_template = " initialize_${configuration_name}(manifest);\n"
self.subclass_call_template = " initialize_all_sm${min_cc}_${subclass_name}_${operation_name}_operations(manifest);\n"
self.subclass_prototype_template = "void initialize_all_sm${min_cc}_${subclass_name}_${operation_name}_operations(Manifest &manifest);\n"
self.epilogue_template ="""}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
"""
#
def __enter__(self):
_LOGGER.debug("*** EmitOperationKindLibrary::__enter__")
_LOGGER.debug(f"*** generated_path: {str(self.generated_path)}")
_LOGGER.debug(f"*** OperationKindNames[kind]: {OperationKindNames[self.kind]}")
_LOGGER.debug(f"*** min_cc: {self.min_cc}")
self.operation_path = os.path.join(self.generated_path, OperationKindNames[self.kind], str(self.min_cc))
_LOGGER.debug(f"*** operation_path (directory to make): {str(self.operation_path)}")
os.makedirs(self.operation_path)
self.top_level_path = os.path.join(self.operation_path, f"all_sm{self.min_cc}_{OperationKindNames[self.kind]}_operations.cu")
_LOGGER.debug(f"*** top_level_path (file to write): {str(self.top_level_path)}")
self.top_level_file = open(self.top_level_path, "w")
self.top_level_file.write(self.header_template)
self.source_files = {}
# Each {operation_kind x cc} combination is further decomposed by the instruction
# types used. This dictionary used to track the file handles for the top-level
# files of each subclass
self.subclass_files = {}
# Configurations in each sub class
self.subclass_configurations = {}
return self
#
def emit(self, configuration_name, operations):
_LOGGER.debug("*** EmitOperationKindLibrary::emit")
_LOGGER.debug(f"*** configuration_name: {configuration_name}")
assert len(operations) > 0
# The extended name for all operations of a given configuration_name is guaranteed
# to be the same because extended_name() is used in defining configuration_name. Thus,
# we can safely use the extended_name() of the first operation.
extended_name = operations[0].extended_name()
_LOGGER.debug('*** extended_name (for all ops): ' + extended_name)
# Create a directory for operations with this subclass if it does not exist
if extended_name not in self.subclass_files:
subclass_path = os.path.join(self.operation_path, extended_name)
_LOGGER.debug(f"*** subclass_path: {str(subclass_path)}")
os.mkdir(subclass_path)
self.subclass_configurations[extended_name] = []
# Open a new top-level file for this sub class
subclass_top_level_path = os.path.join(
subclass_path, f"all_sm{self.min_cc}_{extended_name}_{OperationKindNames[self.kind]}_operations.cu")
_LOGGER.debug('*** subclass_top_level_path (min_cc, extended_name, ' +
'OperationKind): ' + str(subclass_top_level_path))
self.subclass_files[extended_name] = open(subclass_top_level_path, "w")
self.subclass_files[extended_name].write(self.header_template)
self.source_files[extended_name] = [subclass_top_level_path]
subclass_dir = os.path.dirname(self.subclass_files[extended_name].name)
_LOGGER.debug('*** subclass_dir: ' + str(subclass_dir))
with self.emitters[self.kind](subclass_dir, configuration_name) as configuration_emitter:
for operation in operations:
configuration_emitter.emit(operation)
_LOGGER.debug('*** configuration_emitter.configuration_path: ' +
str(configuration_emitter.configuration_path))
self.source_files[extended_name].append(configuration_emitter.configuration_path)
self.subclass_configurations[extended_name].append(configuration_name)
self.subclass_files[extended_name].write(SubstituteTemplate(self.configuration_prototype_template, {'configuration_name': configuration_name} ))
#
def __exit__(self, exception_type, exception_value, traceback):
_LOGGER.debug("*** EmitOperationKindLibrary::__exit__")
for subclass_name, subclass_file in sorted(self.subclass_files.items()):
subclass_cfg = {
'min_cc': str(self.min_cc),
'subclass_name': subclass_name,
'operation_name': OperationKindNames[self.kind]
}
self.top_level_file.write(SubstituteTemplate(self.subclass_prototype_template, subclass_cfg))
self.top_level_file.write(
SubstituteTemplate(self.entry_template, {
'min_cc': str(self.min_cc),
'subclass_name': '',
'operation_name': OperationKindNames[self.kind]
}))
# Finish and close all subclass files
for subclass_name, subclass_file in sorted(self.subclass_files.items()):
subclass_cfg = {
'min_cc': str(self.min_cc),
'subclass_name': subclass_name,
'operation_name': OperationKindNames[self.kind]
}
subclass_file.write(SubstituteTemplate(self.entry_template, subclass_cfg))
for configuration in self.subclass_configurations[subclass_name]:
subclass_file.write(
SubstituteTemplate(self.configuration_template, {
'configuration_name': configuration
}))
subclass_file.write(self.epilogue_template)
subclass_file.close()
# Write the call to initialize_all for this subclass to the top-level file
self.top_level_file.write(SubstituteTemplate(self.subclass_call_template, subclass_cfg))
self.top_level_file.write(self.epilogue_template)
self.top_level_file.close()
class EmitInterfaceLibrary:
"""
Emit the topmost-level CUTLASS library initialization code.
The code is generated in the generated_path directory
(e.g., tools/library/generated in the build directory),
in the initialize_all.cpp file.
That file declares several functions in namespace cutlass::library.
The functions all have this form,
void initialize_all_{operation_kind}_operations(Manifest& manifest);
where {operation_kind} abbreviates the "kind" of operation
(e.g., gemm for matrix-matrix multiply, conv2d for 2-d convolution,
or trmm for triangular solve with multiple right-hand sides).
The definitions of these functions live in subdirectories.
The file also _defines_ the following function in that namespace.
void initialize_all(Manifest& manifest);
That function first prepares the manifest, and then
calls all of the functions declared in this file.
"""
def __init__(self, generated_path, operation_count, args):
self.generated_path = generated_path
self.args = args
self.prototypes = []
self.fn_calls = []
self.operation_count = str(operation_count)
self.top_level_hdr_template = '''
/*
Generated by manifest.py - Do not edit.
*/
'''
self.top_level_prologue = '''
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
namespace cutlass {
\tnamespace library {
${prototypes}
'''
self.top_level_initialize_kind = '''
\t\tvoid initialize_all_${kind}_operations(Manifest &manifest) {
${fn_calls}
\t\t}
'''
self.top_level_initialize = '''
\t\tvoid initialize_all(Manifest &manifest) {
\t\t\tmanifest.reserve(${operation_count});\n
${fn_calls}
\t\t}
'''
self.top_level_suffix = '''
\t} // namespace library
} // namespace cutlass
'''
#
def __enter__(self):
_LOGGER.debug("*** EmitInterfaceLibrary::__enter__")
self.top_level_path = os.path.join(self.generated_path, 'initialize_all.cpp')
_LOGGER.debug("*** top_level_path: " + str(self.top_level_path))
self.top_level_file = open(self.top_level_path, "w")
self.top_level_file.write(self.top_level_hdr_template)
self.source_files = [self.top_level_path,]
return self
#
def emit(self, operation_name):
_LOGGER.debug("*** EmitInterfaceLibrary::emit")
_LOGGER.debug("*** operation_name: " + operation_name)
self.prototypes.append(SubstituteTemplate(
"\t\tvoid initialize_all_${operation_kind}_operations(Manifest &manifest);",
{'operation_kind': operation_name}))
self.fn_calls.append(SubstituteTemplate(
"\t\t\tinitialize_all_${operation_kind}_operations(manifest);",
{'operation_kind': operation_name}))
#
def __exit__(self, exception_type, exception_value, traceback):
_LOGGER.debug("*** EmitInterfaceLibrary::__exit__")
self.top_level_file.write(SubstituteTemplate(self.top_level_prologue, {'prototypes':"\n".join(self.prototypes)}))
# Write out initialize_all method
self.top_level_file.write(SubstituteTemplate(self.top_level_initialize,
{'operation_count': self.operation_count, 'fn_calls':"\n".join(self.fn_calls)}))
self.top_level_file.write(self.top_level_suffix)
self.top_level_file.close()
###################################################################################################
###################################################################################################
class Options:
def __init__(self):
pass
###################################################################################################
#
class Manifest:
#
def __init__(self, args = None):
self.operations = {}
self.args = args
self.operation_count = 0
self.operations_by_name = {}
self.kernel_filter = ''
self.kernel_filter_list = []
self.kernel_names = []
self.operations_enabled = []
self.selected_kernels = []
self.ignore_kernel_names = []
self.exclude_kernel_names = []
self.compute_capabilities = [50,]
self.curr_build_dir = '.'
self.filter_by_cc = True
if self.args:
self.kernel_filter = self.args.kernels
self.curr_build_dir = args.curr_build_dir
# A common user error is to use commas instead of semicolons.
if ',' in args.architectures:
raise RuntimeError("The list of architectures (CMake option CUTLASS_NVCC_ARCHS) must be semicolon-delimited.\nDon't use commas to separate the architectures; use semicolons.\nYou specified the list as: " + args.architectures)
architectures = args.architectures.split(';') if len(args.architectures) else ['50',]
arch_conditional_cc = ['90a']
architectures = [x if x not in arch_conditional_cc else x.split('a')[0] for x in architectures]
self.compute_capabilities = [int(x) for x in architectures]
if args.filter_by_cc in ['false', 'False', '0']:
self.filter_by_cc = False
if args.operations == 'all':
self.operations_enabled = []
else:
operations_list = [
OperationKind.Gemm
, OperationKind.Conv2d
, OperationKind.Conv3d
, OperationKind.RankK
, OperationKind.Trmm
, OperationKind.Symm
]
self.operations_enabled = [x for x in operations_list if OperationKindNames[x] in args.operations.split(',')]
if args.kernels == 'all':
self.kernel_names = []
else:
self.kernel_names = [x for x in args.kernels.split(',') if x != '']
self.ignore_kernel_names = [x for x in args.ignore_kernels.split(',') if x != '']
self.exclude_kernel_names = [x for x in args.exclude_kernels.split(',') if x != '']
if args.kernel_filter_file is None:
self.kernel_filter_list = []
else:
self.kernel_filter_list = self.get_kernel_filters(args.kernel_filter_file)
_LOGGER.debug("Using {filter_count} kernel filters from {filter_file}".format(
filter_count = len(self.kernel_filter_list),
filter_file = args.kernel_filter_file))
self.operation_count = 0
self.operations_by_name = {}
self.disable_full_archs_compilation = args.disable_full_archs_compilation
def get_kernel_filters (self, kernelListFile):
if os.path.isfile(kernelListFile):
with open(kernelListFile, 'r') as fileReader:
lines = [line.rstrip() for line in fileReader if not line.startswith("#")]
lines = [re.compile(line) for line in lines if line]
return lines
else:
return []
#
def filter_out_kernels(self, kernel_name, kernel_filter_list):
for kernel_filter_re in kernel_filter_list:
if kernel_filter_re.search(kernel_name) is not None:
return True
return False
#
def _filter_string_matches(self, filter_string, haystack):
''' Returns true if all substrings appear in the haystack in order'''
substrings = filter_string.split('*')
for sub in substrings:
idx = haystack.find(sub)
if idx < 0:
return False
haystack = haystack[idx + len(sub):]
return True
#
def filter(self, operation):
''' Filtering operations based on various criteria'''
# filter based on compute capability
enabled = not (self.filter_by_cc)
for cc in self.compute_capabilities:
if cc >= operation.tile_description.minimum_compute_capability and \
cc <= operation.tile_description.maximum_compute_capability and \
(cc not in SharedMemPerCC or SharedMemPerCC[cc] >= CalculateSmemUsage(operation)):
enabled = True
break
if not enabled:
return False
if len(self.operations_enabled) and not operation.operation_kind in self.operations_enabled:
return False
name = operation.procedural_name()
# eliminate duplicates
if name in self.operations_by_name.keys():
return False
# Filter based on list of valid substrings
if len(self.kernel_names):
enabled = False
# compare against the include list
for name_substr in self.kernel_names:
if self._filter_string_matches(name_substr, name):
_LOGGER.debug(f"Kernel {name} included due to filter string '{name_substr}'.")
enabled = True
break
else:
_LOGGER.debug(f"Kernel {name} NOT included due to not matching '{name_substr}'.")
# compare against the exclude list
for name_substr in self.ignore_kernel_names:
if self._filter_string_matches(name_substr, name):
_LOGGER.debug(f"Kernel {name} ignored due to filter string '{name_substr}'.")
enabled = False
break
else:
_LOGGER.debug(f"Kernel {name} NOT ignored due to not matching '{name_substr}'.")
if len(self.kernel_filter_list) > 0:
if self.filter_out_kernels(name, self.kernel_filter_list):
_LOGGER.debug(f"Kernel {name} matched via kernel filter file.")
enabled = True
else:
_LOGGER.debug(f"Kernel {name} culled due to no match in kernel filter file.")
enabled = False
# CUTLASS_LIBRARY_IGNORE_KERNELS ("ignore" list) only takes effect
# if CUTLASS_LIBRARY_KERNELS was specified.
# Changing that would break backwards compatibility.
# Thus, CUTLASS has introduced the new CMake option CUTLASS_LIBRARY_EXCLUDE_KERNELS,
# that always takes effect, whether or not CUTLASS_LIBRARY_KERNELS was specified.
for name_substr in self.exclude_kernel_names:
if self._filter_string_matches(name_substr, name):
_LOGGER.debug(f"Kernel {name} excluded due to filter string '{name_substr}'.")
enabled = False
break
else:
_LOGGER.debug(f"Kernel {name} NOT excluded due to not matching '{name_substr}'.")
# TODO: filter based on compute data type
return enabled
#
#
def append(self, operation):
'''
Inserts the operation.
operation_kind -> configuration_name -> []
'''
if self.filter(operation):
self.selected_kernels.append(operation.procedural_name())
self.operations_by_name[operation.procedural_name()] = operation
# add the configuration
configuration_name = operation.configuration_name()
# Split operations by minimum CC
min_cc = operation.arch
if operation.operation_kind not in self.operations.keys():
self.operations[operation.operation_kind] = {}
if min_cc not in self.operations[operation.operation_kind]:
self.operations[operation.operation_kind][min_cc] = {}
if configuration_name not in self.operations[operation.operation_kind][min_cc].keys():
self.operations[operation.operation_kind][min_cc][configuration_name] = []
self.operations[operation.operation_kind][min_cc][configuration_name].append(operation)
self.operation_count += 1
else:
_LOGGER.debug("Culled {} from manifest".format(operation.procedural_name()))
#
def emit_manifest_cmake(self, manifest_path, top_level_path, source_files):
with open(manifest_path, "w") as manifest_file:
target_text = SubstituteTemplate("""cutlass_target_sources(cutlass_library_objs PRIVATE
""", { })
manifest_file.write(target_text + '\n\n')
manifest_file.write(" %s\n" % str(top_level_path.replace('\\', '/')))
generated_path = os.path.join(self.curr_build_dir, 'generated')
for kind in self.operations.keys():
kind_str = OperationKindNames[kind]
all_kind_file = os.path.join(generated_path, kind_str, f"all_{kind_str}_operations.cu").replace('\\', '/')
manifest_file.write(f" {all_kind_file}\n")
manifest_file.write(')\n\n')
for kind in self.operations.keys():
for min_cc in sorted(self.operations[kind].keys()):
for subclass in sorted(source_files[kind][min_cc].keys()):
target_text = SubstituteTemplate("""cutlass_add_cutlass_library(
SUFFIX ${kind}_sm${min_cc}_${subclass}
""", { 'min_cc': str(min_cc), 'kind': OperationKindNames[kind], 'subclass': subclass })
manifest_file.write(target_text + '\n\n')
for source_file in source_files[kind][min_cc][subclass]:
manifest_file.write(" %s\n" % str(source_file.replace('\\', '/')))
manifest_file.write(")\n")
if self.disable_full_archs_compilation:
self.emit_disable_full_archs_compilation(manifest_file, source_files)
def emit_disable_full_archs_compilation(manifest_file, source_files):
def for_hopper(name):
pass
def for_ampere(name):
return "16816" in name or \
"16832" in name or \
"16864" in name or \
("1688" in name and "tf32" in name)
def for_turing(name):
return ("1688" in name and "tf32" not in name) or \
"8816" in name
def for_volta(name):
return "884" in name
def is_cpp(name):
return name.endswith(".cpp")
def get_src_archs_str_given_requested_cuda_archs(archs, source_file):
intersected_archs = archs & set(self.compute_capabilities)
if intersected_archs == set():
raise RuntimeError(
"""
Empty archs set for file {} after taking
the intersection of {} (global requested archs) and
{} (per file requested archs)
""".format(source_file, set(self.compute_capabilities), archs))
else:
return " ".join(map(str, intersected_archs))
for min_cc in sorted(source_files.keys()):
for source_file in source_files[min_cc]:
if is_cpp(source_file):
continue # skip because source is cpp
elif for_ampere(source_file):
archs_str = get_src_archs_str_given_requested_cuda_archs({80, 87, 90}, source_file)
elif for_turing(source_file):
archs_str = get_src_archs_str_given_requested_cuda_archs({75}, source_file)
elif for_volta(source_file):
archs_str = get_src_archs_str_given_requested_cuda_archs({70, 72}, source_file)
else:
raise RuntimeError("Per file archs are not set {}, as there is no rule specified for this file pattern".format(source_file))
manifest_file.write("cutlass_apply_cuda_gencode_flags({} SM_ARCHS {})\n".format(str(source_file.replace('\\', '/')), archs_str))
#
def emit(self, target = GeneratorTarget.Library):
operation_emitters = {
GeneratorTarget.Library: EmitOperationKindLibrary
}
# Emitters for all operations that fall under a particular kind (e.g., GEMM, Conv2d)
kind_emitters = {
GeneratorTarget.Library: EmitOperationKindAll
}
interface_emitters = {
GeneratorTarget.Library: EmitInterfaceLibrary
}
generated_path = os.path.join(self.curr_build_dir, 'generated')
# create generated/
if os.path.exists(generated_path):
shutil.rmtree(generated_path)
os.mkdir(generated_path)
with interface_emitters[target](generated_path, self.operation_count, self.args) as iface_emitter:
top_level_path = iface_emitter.top_level_path
for operation_kind in self.operations.keys():
iface_emitter.emit(OperationKindNames[operation_kind])
source_files = {}
for kind in self.operations.keys():
source_files[kind] = {}
for min_cc in self.operations[kind].keys():
source_files[kind][min_cc] = {}
for operation_kind, ops in self.operations.items():
for min_cc, configurations in sorted(ops.items()):
with operation_emitters[target](generated_path, min_cc, operation_kind, self.args) as operation_kind_emitter:
for configuration_name, operations in configurations.items():
_LOGGER.info(f"Emitting {configuration_name} with {len(operations)} operation{'' if len(operations) == 1 else 's'}.")
operation_kind_emitter.emit(configuration_name, operations)
for subclass, files in operation_kind_emitter.source_files.items():
if subclass not in source_files[operation_kind][min_cc]:
source_files[operation_kind][min_cc][subclass] = []
source_files[operation_kind][min_cc][subclass].extend(operation_kind_emitter.source_files[subclass])
# Emit top level all_{gemm, conv2d, ...}_operations.cu files
with kind_emitters[target](generated_path, operation_kind, self.args) as operation_kind_emitter:
operation_kind_emitter.emit(ops)
# write the manifest.cmake file containing paths from all targets
manifest_path = os.path.join(generated_path, "manifest.cmake")
self.emit_manifest_cmake(manifest_path, top_level_path, source_files)
###################################################################################################
| python/cutlass_library/manifest.py/0 | {
"file_path": "python/cutlass_library/manifest.py",
"repo_id": "python",
"token_count": 11411
} | 44 |
{
"path": "./../../../../examples/python/00_basic_gemm.ipynb"
}
| python/docs_src/source/externals/00_basic_gemm.nblink/0 | {
"file_path": "python/docs_src/source/externals/00_basic_gemm.nblink",
"repo_id": "python",
"token_count": 31
} | 45 |
#################################################################################################
#
# Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for defining Conv2D problem sizes for testing.
This file was ported from the C++ version in test/unit/conv/device/conv2d_problems.h
"""
from cutlass_library import ConvMode
import cutlass
from cutlass.shape import Conv2DProblemSize
class TestbedConv2dProblemSizes:
def __init__(self, minimum_channel_size: int):
conv2d_default_sizes = self.initialize_conv2d_default_sizes(minimum_channel_size)
conv2d_rigorous_sizes = self.initialize_conv2d_rigorous_sizes(minimum_channel_size)
conv2d_resnet50_sizes = self.initialize_conv2d_resnet50_sizes(1)
conv2d_resnet50_sizes_perf = self.initialize_conv2d_resnet50_sizes(34)
grouped_sizes = self.initialize_conv2d_grouped_sizes()
# Filter all problems
self.all = []
for size_list in [conv2d_default_sizes, conv2d_rigorous_sizes, conv2d_resnet50_sizes, conv2d_resnet50_sizes_perf, grouped_sizes]:
for size in size_list:
if (size.C // size.groups) % minimum_channel_size == 0:
self.all.append(size)
def initialize_conv2d_default_sizes(self, minimum_channel_size):
# Small input size x stride (1,1)
# C < CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64}
conv2d_default_sizes = []
conv2d_default_sizes.append(Conv2DProblemSize(
1, 1, 1, minimum_channel_size,
8, 1, 1, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 1, 8, minimum_channel_size,
8, 1, 3, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 7, 8, minimum_channel_size,
8, 3, 3, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 7, 9, minimum_channel_size,
8, 4, 4, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
2, 7, 9, minimum_channel_size,
8, 5, 5, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
3, 7, 9, minimum_channel_size,
8, 6, 5, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
3, 7, 9, minimum_channel_size,
8, 6, 6, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
3, 7, 9, minimum_channel_size,
8, 7, 7, minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
##############################################
# Small input size x stride (2,2)
# C < CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64}
##############################################
conv2d_default_sizes.append(Conv2DProblemSize(
1, 11, 7, minimum_channel_size,
8, 1, 1, minimum_channel_size,
0, 0,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 11, 7, minimum_channel_size,
8, 3, 3, minimum_channel_size,
1, 1,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 13, 11, minimum_channel_size,
8, 1, 1, minimum_channel_size,
1, 1,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 17, 19, minimum_channel_size,
16, 2, 2, minimum_channel_size,
1, 1,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 23, 5, minimum_channel_size,
16, 3, 3, minimum_channel_size,
1, 1,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 13, 17, 8,
24, 3, 3, 8,
0, 0,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 23, 21, 8,
24, 3, 3, 8,
1, 1,
3, 3,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 20, 24, 8,
40, 3, 3, 8,
3, 3,
3, 3,
1, 1,
))
##########################################
# Medium input size (1x16x16x128), filter size (1x1, 2x2, 3x3, 5x5), stride (1, 1)
##########################################
conv2d_default_sizes.append(Conv2DProblemSize(
1, 15, 19, 160,
224, 1, 1, 160,
0, 0,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 19, 37, 160,
224, 3, 3, 160,
1, 1,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 16, 16, 160,
224, 2, 3, 160,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 23, 21, 128,
224, 3, 3, 128,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 29, 37, 160,
224, 5, 5, 160,
2, 2,
1, 1,
1, 1,
))
##########################################
# C > CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64}
##########################################
conv2d_default_sizes.append(Conv2DProblemSize(
1, 15, 19, 32 + minimum_channel_size,
96, 3, 3, 32 + minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 16, 24, 64 + minimum_channel_size,
96, 3, 3, 64 + minimum_channel_size,
1, 1,
1, 1,
1, 1,
))
##########################################
# Medium input size, filter size (1x1, 3,x3, 5x5, 7x7), stride (2, 2)
##########################################
conv2d_default_sizes.append(Conv2DProblemSize(
1, 13, 16, 288,
160, 5, 5, 288,
2, 2,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 55, 51, 256,
512, 1, 1, 256,
0, 0,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 71, 80, 32,
64, 5, 5, 32,
2, 2,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 224, 224, 8,
64, 7, 7, 8,
3, 3,
2, 2,
1, 1,
))
##########################################
# Medium input size stride (3, 3), filter (3, 3), non-default padding
##########################################
conv2d_default_sizes.append(Conv2DProblemSize(
1, 27, 23, 256,
512, 3, 3, 256,
0, 0,
3, 3,
1, 1,
))
##########################################
# Medium input size padding > stride, asymmetric filter, padding and striding
##########################################
conv2d_default_sizes.append(Conv2DProblemSize(
1, 27, 31, 256,
512, 3, 3, 256,
5, 7,
3, 4,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 27, 35, 256,
512, 7, 5, 256,
11, 7,
3, 5,
1, 1,
))
##########################################
# Medium input size *mixed* stride (1, 2) and (2, 1),
# filter (3, 3), default padding
##########################################
conv2d_default_sizes.append(Conv2DProblemSize(
1, 27, 27, 256,
512, 3, 3, 256,
1, 1,
1, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 27, 27, 256,
512, 3, 3, 256,
1, 1,
2, 1,
1, 1,
))
######################################/
# Additional input size
######################################/
conv2d_default_sizes.append(Conv2DProblemSize(
3, 28, 28, 256,
256, 2, 2, 256,
0, 0,
2, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
1, 32, 32, 16,
32, 3, 3, 16,
1, 1,
6, 2,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
32, 24, 32, 32,
32, 1, 2, 32,
0, 0,
1, 1,
1, 1,
))
conv2d_default_sizes.append(Conv2DProblemSize(
4, 2, 3, 256,
328, 3, 5, 256,
1, 1,
1, 1,
1, 1,
))
return conv2d_default_sizes
# Add a few large and rigorous convolution problem sizes
def initialize_conv2d_rigorous_sizes(self, minimum_channel_size):
sizes = []
if False:
sizes.append(Conv2DProblemSize.from_sizes(
(1, 124, 224, 2 * minimum_channel_size),
(24, 7, 7, 2 * minimum_channel_size),
))
sizes.append(Conv2DProblemSize.from_sizes(
(1, 233, 35, minimum_channel_size),
(24, 7, 5, minimum_channel_size),
))
return sizes
# Add resent50 layers to unit testing sizes
def initialize_conv2d_resnet50_sizes(self, batch_size):
conv2d_problem_vector = []
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 56, 56, 64,
256, 1, 1, 64,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 56, 56, 64,
64, 1, 1, 64,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 56, 56, 64,
64, 3, 3, 64,
1, 1,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 56, 56, 256,
64, 1, 1, 256,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 56, 56, 256,
512, 1, 1, 256,
0, 0,
2, 2,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 56, 56, 256,
128, 1, 1, 256,
0, 0,
2, 2,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 28, 28, 128,
128, 3, 3, 128,
1, 1,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 28, 28, 128,
512, 1, 1, 128,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 28, 28, 512,
128, 1, 1, 512,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 28, 28, 512,
1024, 1, 1, 512,
0, 0,
2, 2,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 28, 28, 512,
256, 1, 1, 512,
0, 0,
2, 2,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 14, 14, 256,
256, 3, 3, 256,
1, 1,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 14, 14, 256,
1024, 1, 1, 256,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 14, 14, 1024,
256, 1, 1, 1024,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 14, 14, 1024,
2048, 1, 1, 1024,
0, 0,
2, 2,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 14, 14, 1024,
512, 1, 1, 1024,
0, 0,
2, 2,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 7, 7, 512,
512, 3, 3, 512,
1, 1,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 7, 7, 512,
2048, 1, 1, 512,
0, 0,
1, 1,
1, 1,
))
conv2d_problem_vector.append(Conv2DProblemSize(
batch_size, 7, 7, 2048,
512, 1, 1, 2048,
0, 0,
1, 1,
1, 1,
))
return conv2d_problem_vector
def initialize_conv2d_grouped_sizes(self):
threadblock_n = 128
threadblock_k = 32
sizes = []
##########################################
# One group calculated by one or multiple CTAs: k_per_group % CTA::N = 0
# One CTA calculates a single group
##########################################
for cta_per_group_k in range(1, 4):
for groups in range(2, 5):
conv_k = cta_per_group_k * threadblock_n * groups
sizes.append(Conv2DProblemSize(
1, 8, 8, threadblock_k * 2 * groups,
conv_k, 3, 3, threadblock_k * 2,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1,
groups
))
# Partial gemm_k: k_per_group == CTA::N && channels_per_group < CTA::K
sizes.append(Conv2DProblemSize(
1, 8, 8, threadblock_k,
threadblock_n * 2, 3, 3, threadblock_k // 2,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1,
2
))
sizes.append(Conv2DProblemSize(
1, 56, 56, 696,
768, 3, 3, 232,
1, 1,
2, 2,
1, 1,
ConvMode.CrossCorrelation,
1,
3
))
sizes.append(Conv2DProblemSize(
1, 14, 14, 1392,
1536, 3, 3, 232,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1,
3
))
##########################################
# One CTA calculate multiple groups: CTA::N % k_per_group = 0
##########################################
# 2 groups per CTA
sizes.append(Conv2DProblemSize(
1, 8, 8, threadblock_k * 4,
threadblock_n, 3, 3, threadblock_k * 2,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1,
2
))
# 2 groups per CTA and partial gemm_k
sizes.append(Conv2DProblemSize(
1, 8, 8, threadblock_k,
threadblock_n, 3, 3, threadblock_k // 2,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1,
2
))
# 4 groups per CTA
sizes.append(Conv2DProblemSize(
1, 8, 8, threadblock_k * 8,
threadblock_n // 2, 3, 3, threadblock_k * 2,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1,
4
))
# 4 groups per CTA and partial gemm_k
sizes.append(Conv2DProblemSize(
1, 8, 8, threadblock_k * 2,
threadblock_n // 2, 3, 3, threadblock_k // 2,
1, 1,
1, 1,
1, 1,
ConvMode.CrossCorrelation,
1,
4
))
return sizes
| test/python/cutlass/conv2d/conv2d_problem_sizes.py/0 | {
"file_path": "test/python/cutlass/conv2d/conv2d_problem_sizes.py",
"repo_id": "test",
"token_count": 9922
} | 46 |
/***************************************************************************************************
* Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Testbed for running device-level Conv2Ds with absolute maximum calculation and scaling
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "conv2d_problems.h"
#include "../../common/cutlass_unit_test.h"
#include "../../gemm/device/testbed_utils.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_reduce.h"
namespace test {
namespace conv {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Conv,
template<typename T> class ActivationFunctor
>
struct TestbedConv2dWithAbsMax {
using ElementAccumulator = typename Conv::ElementAccumulator;
using ElementCompute = typename Conv::UnderlyingKernel::Epilogue::OutputOp::ElementCompute;
using ElementScalingFactor = typename Conv::EpilogueOutputOp::ElementScalingFactor;
using ElementAbsmax = typename Conv::EpilogueOutputOp::ElementAbsmax;
static cutlass::conv::Operator const kConvolutionalOperator = Conv::kConvolutionalOperator;
static bool const kScaleAux = Conv::EpilogueOutputOp::kIsScalingAndAmaxAuxOutputNeeded;
static bool const kScaleOutput = Conv::EpilogueOutputOp::kIsScalingAndAmaxOutputNeeded;
bool doScaleA;
bool doScaleB;
bool doScaleC;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<typename Conv::ElementA, typename Conv::LayoutA> tensor_A;
cutlass::HostTensor<typename Conv::ElementB, typename Conv::LayoutB> tensor_B;
cutlass::HostTensor<typename Conv::ElementC, typename Conv::LayoutC> tensor_C;
cutlass::HostTensor<typename Conv::EpilogueOutputOp::ElementAuxOutput, typename Conv::LayoutC> tensor_Aux;
cutlass::HostTensor<typename Conv::EpilogueOutputOp::ElementOutput, typename Conv::LayoutC> tensor_D;
cutlass::HostTensor<typename Conv::ElementC, typename Conv::LayoutC> tensor_Vector;
cutlass::HostTensor<ElementAccumulator, typename Conv::LayoutC> tmp_D;
cutlass::HostTensor<typename Conv::EpilogueOutputOp::ElementOutput, typename Conv::LayoutC> reference_D;
cutlass::HostTensor<typename Conv::EpilogueOutputOp::ElementAuxOutput, typename Conv::LayoutC> reference_Aux;
cutlass::HostTensor<ElementScalingFactor, typename Conv::LayoutC> scale_A;
cutlass::HostTensor<ElementScalingFactor, typename Conv::LayoutC> scale_B;
cutlass::HostTensor<ElementScalingFactor, typename Conv::LayoutC> scale_C;
cutlass::HostTensor<ElementScalingFactor, typename Conv::LayoutC> scale_D;
cutlass::HostTensor<ElementScalingFactor, typename Conv::LayoutC> scale_Aux;
cutlass::HostTensor<ElementAbsmax, typename Conv::LayoutC> abs_max_Aux;
cutlass::HostTensor<ElementAbsmax, typename Conv::LayoutC> abs_max_D;
cutlass::HostTensor<ElementAbsmax, typename Conv::LayoutC> reference_abs_max_Aux;
cutlass::HostTensor<ElementAbsmax, typename Conv::LayoutC> reference_abs_max_D;
//
// Methods
//
TestbedConv2dWithAbsMax(
bool scaleA = true,
bool scaleB = true,
bool scaleC = true,
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
doScaleA(scaleA), doScaleB(scaleB), doScaleC(scaleC),
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize scaling factors
template <typename Element, typename Layout>
bool initialize_scale_factor(cutlass::TensorView<Element, Layout> view, uint64_t seed, int bits=0) {
cutlass::reference::host::TensorFillRandomUniform(view, seed, double(1.), double(0.), bits);
return true;
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Conv::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::conv::Conv2dProblemSize const &problem_size) {
//
// Allocate the GEMM workspace
//
tensor_A.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size));
tensor_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size));
tensor_C.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_D.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
tensor_Vector.resize({1, 1, 1, implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size).c()});
reference_D.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size), false);
tmp_D.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size), false);
EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017));
EXPECT_TRUE(initialize_tensor(tensor_Vector.host_view(), init_C, seed + 2020));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
cutlass::Coord<4> origin(0);
tensor_A.host_view().at(origin) = typename Conv::ElementA(1);
tensor_B.host_view().at(origin) = typename Conv::ElementB(1);
tensor_C.host_view().at(origin) = typename Conv::ElementC(1);
tensor_Vector.host_view().at(origin) = typename Conv::ElementC(1);
cutlass::reference::host::TensorFill(tensor_D.host_view());
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
tensor_Vector.sync_device();
int scale_bits = 2;
if (doScaleA) {
scale_A.resize({1, 1, 1, 1});
EXPECT_TRUE(initialize_scale_factor(scale_A.host_view(), seed + 2021, scale_bits));
scale_A.sync_device();
}
if (doScaleB) {
scale_B.resize({1, 1, 1, 1});
EXPECT_TRUE(initialize_scale_factor(scale_B.host_view(), seed + 2022, scale_bits));
scale_B.sync_device();
}
if (doScaleC) {
scale_C.resize({1, 1, 1, 1});
EXPECT_TRUE(initialize_scale_factor(scale_C.host_view(), seed + 2023, scale_bits));
scale_C.sync_device();
}
if (kScaleOutput) {
scale_D.resize({1, 1, 1, 1});
EXPECT_TRUE(initialize_scale_factor(scale_D.host_view(), seed + 2024, scale_bits));
scale_D.sync_device();
abs_max_D.resize({1, 1, 1, 1});
cutlass::reference::host::TensorFill(abs_max_D.host_view());
abs_max_D.sync_device();
reference_abs_max_D.resize({1, 1, 1, 1});
}
if (kScaleAux) {
tensor_Aux.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size));
cutlass::reference::host::TensorFill(tensor_Aux.host_view());
tensor_Aux.sync_device();
scale_Aux.resize({1, 1, 1, 1});
EXPECT_TRUE(initialize_scale_factor(scale_Aux.host_view(), seed + 2025, scale_bits));
scale_Aux.sync_device();
abs_max_Aux.resize({1, 1, 1, 1});
cutlass::reference::host::TensorFill(abs_max_Aux.host_view());
abs_max_Aux.sync_device();
reference_Aux.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size), false);
reference_abs_max_Aux.resize({1, 1, 1, 1});
}
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::conv::Conv2dProblemSize const &problem_size,
ElementCompute alpha,
ElementCompute beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(reference_D.host_view(), tensor_D.host_view());
if (kScaleAux) {
tensor_Aux.sync_host();
abs_max_Aux.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_Aux.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(abs_max_Aux.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_Aux.host_view()), 0);
passed &= cutlass::reference::host::TensorEquals(reference_Aux.host_view(), tensor_Aux.host_view());
passed &= cutlass::reference::host::TensorEquals(abs_max_Aux.host_view(), reference_abs_max_Aux.host_view());
}
if (kScaleOutput) {
abs_max_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(abs_max_D.host_view()), 0);
passed &= cutlass::reference::host::TensorEquals(abs_max_D.host_view(), reference_abs_max_D.host_view());
}
EXPECT_TRUE(passed) << " mismatched reference";
if (!passed) {
std::ofstream file0("conv_testbed_with_amax_errors_reference.txt");
std::ofstream file1("conv_testbed_with_amax_errors_computed.txt");
std::ofstream file("conv_testbed_with_amax_errors.txt");
file
<< "problem: " << problem_size
<< ", alpha: " << alpha << ", beta: " << beta << "\n\n";
file
<< "A =\n" << tensor_A.host_view()
<< "\nB =\n" << tensor_B.host_view()
<< "\nC =\n" << tensor_C.host_view()
<< "\nVector =\n" << tensor_Vector.host_view()
<< "\nScaleA = " << scale_A.host_view()
<< "\nScaleB = " << scale_B.host_view()
<< "\nScaleC = " << scale_C.host_view()
<< "\nScaleD = " << scale_D.host_view()
<< "\nScaleAux = " << scale_Aux.host_view()
<< std::endl;
file0 << "\n\nReference D =\n" << reference_D.host_view() << std::endl;
file1 << "\n\nComputed D =\n" << tensor_D.host_view() << std::endl;
if (kScaleAux) {
file0 << "\n\nReference Aux =\n" << reference_Aux.host_view() << std::endl;
file1 << "\n\nComputed Aux =\n" << tensor_Aux.host_view() << std::endl;
file0 << "\n\nReference Absmax Aux = " << reference_abs_max_Aux.host_view() << std::endl;
file1 << "\n\nComputed Absmax Aux = " << abs_max_Aux.host_view() << std::endl;
}
if (kScaleOutput) {
file0 << "\n\nReference Absmax D = " << reference_abs_max_D.host_view() << std::endl;
file1 << "\n\nComputed Absmax D = " << abs_max_D.host_view() << std::endl;
}
}
return passed;
}
/// Verifies the result is a GEMM
bool verify(
cutlass::conv::Conv2dProblemSize const &problem_size,
ElementCompute alpha,
ElementCompute beta) {
cutlass::Coord<4> origin(0);
ElementCompute scaled_alpha = alpha;
if (doScaleA) {
scaled_alpha *= scale_A.host_view().at(origin);
}
if (doScaleB) {
scaled_alpha *= scale_B.host_view().at(origin);
}
ElementCompute scaled_beta = beta;
if (doScaleC) {
scaled_beta *= scale_C.host_view().at(origin);
}
//
// Verify
//
cutlass::reference::host::Conv2d<
typename Conv::ElementA, typename Conv::LayoutA,
typename Conv::ElementB, typename Conv::LayoutB,
typename Conv::ElementC, typename Conv::LayoutC,
ElementCompute, ElementAccumulator, ElementAccumulator
>(
kConvolutionalOperator,
problem_size,
tensor_A.host_ref(),
tensor_B.host_ref(),
tensor_C.host_ref(),
tmp_D.host_ref(),
scaled_alpha,
scaled_beta
);
ElementCompute tmp_abs_max_Aux(0.);
ElementCompute tmp_abs_max_D(0.);
cutlass::NumericConverter<ElementCompute, typename Conv::ElementC> cvt_c_to_compute;
cutlass::NumericConverter<ElementCompute, ElementAccumulator> cvt_accum_to_compute;
cutlass::NumericConverter<ElementAbsmax, ElementCompute> cvt_compute_to_absmax;
cutlass::NumericConverter<typename Conv::EpilogueOutputOp::ElementOutput, ElementCompute> cvt_compute_to_d;
cutlass::NumericConverter<typename Conv::EpilogueOutputOp::ElementAuxOutput, ElementCompute> cvt_compute_to_aux;
cutlass::absolute_value_op<ElementCompute> abs;
cutlass::maximum_with_nan_propogation<ElementCompute> max;
ActivationFunctor<ElementCompute> act;
ElementScalingFactor d_scale = kScaleOutput ? scale_D.host_view().at(origin) : ElementScalingFactor(1.);
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
for (int k = 0; k < problem_size.K; ++k) {
ElementCompute intermediate = cvt_accum_to_compute(tmp_D.host_view().at({n, p, q, k}));
ElementCompute bias = cvt_c_to_compute(tensor_Vector.host_view().at({0, 0, 0, k}));
ElementCompute aux = intermediate + bias;
ElementCompute d = act(aux);
tmp_abs_max_Aux = max(abs(aux), tmp_abs_max_Aux);
tmp_abs_max_D = max(abs(d), tmp_abs_max_D);
reference_D.host_view().at({n, p, q, k}) = cvt_compute_to_d(d * d_scale);
if (kScaleAux) {
reference_Aux.host_view().at({n, p, q, k}) = cvt_compute_to_aux(aux * scale_Aux.host_view().at(origin));
}
}
}
}
}
if (kScaleAux) {
reference_abs_max_Aux.host_view().at(origin) = cvt_compute_to_absmax(tmp_abs_max_Aux);
}
if (kScaleOutput) {
reference_abs_max_D.host_view().at(origin) = cvt_compute_to_absmax(tmp_abs_max_D);
}
return compare_reference(problem_size, alpha, beta);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Conv::UnderlyingKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::conv::Conv2dProblemSize const &problem_size,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0))
{
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
this->initialize(problem_size);
//
// Initialize the GEMM operator
//
typename Conv::EpilogueOutputOp::Params::ActivationParams activation_params{alpha, beta};
typename Conv::EpilogueOutputOp::Params epilogue_params{
activation_params,
scale_A.device_data(),
scale_B.device_data(),
scale_C.device_data(),
scale_D.device_data(),
scale_Aux.device_data(),
abs_max_Aux.device_data(),
abs_max_D.device_data()
};
typename Conv::Arguments arguments{
problem_size,
tensor_A.device_ref(),
tensor_B.device_ref(),
tensor_C.device_ref(),
tensor_D.device_ref(),
tensor_Aux.device_ref(),
epilogue_params,
cutlass::conv::SplitKMode::kSerial,
tensor_Vector.device_data(),
0
};
Conv conv2d_op;
cutlass::Status status = conv2d_op.can_implement(arguments);
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
size_t workspace_size = Conv::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
status = conv2d_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the GEMM
//
status = conv2d_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
cudaError_t cuda_error = cudaDeviceSynchronize();
EXPECT_TRUE(cuda_error == cudaSuccess) << cudaGetErrorString(cuda_error);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
if (!passed) {
std::cout << "Failed" << std::endl;
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ImplicitGemm,
template<typename T> class ActivationFunctor = cutlass::epilogue::thread::Identity
>
bool TestAllConv2dWithAbsmax(bool scaleA=true, bool scaleB=true, bool scaleC=true) {
const Conv2dProblemVector &conv_test_sizes = Conv2dProblemVector();
const Conv2dProblemVector &conv_blacklist_sizes = Conv2dProblemVector();
//
// Testbed object
//
TestbedConv2dWithAbsMax<ImplicitGemm, ActivationFunctor> testbed(scaleA, scaleB, scaleC);
//
// Get conv problem sizes to run conv operator
//
TestbedConv2dProblemSizes conv_problems(128/cutlass::sizeof_bits<typename ImplicitGemm::ElementA>::value);
// Vector of conv2d problem sizes to avoid duplicate runs
Conv2dProblemVector conv_tested_sizes;
Conv2dProblemVector const *problem_vectors[] = {
&conv_test_sizes, // run user specified sizes
&conv_problems.conv2d_default_sizes, // run default and cudnn bug sizes
&conv_problems.conv2d_resnet50_sizes, // run resnet50 sizes
#if CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED
&conv_problems.conv2d_rigorous_sizes, // run large and rigorous sizes if enabled
#endif
};
bool passed = true;
// Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0)
for (Conv2dProblemVector const * problem_vector : problem_vectors) {
// Prune all problems with channels that aren't divisible by the number of elements accessed per
// load for operands A and B. This is meant to align with the requirements of iterators used for
// fprop kernels.
ChannelDivisibilitySpecification channel_spec(128 / cutlass::sizeof_bits<typename ImplicitGemm::ElementA>::value);
auto pruned_problem_vector = prune(*problem_vector, channel_spec);
// Run conv testbed on default convolution sizes
for(auto conv_problem : pruned_problem_vector) {
// Skip blacklist and avoid duplicate problem sizes
if (std::find(conv_blacklist_sizes.begin(), conv_blacklist_sizes.end(), conv_problem) != conv_blacklist_sizes.end() ||
std::find(conv_tested_sizes.begin(), conv_tested_sizes.end(), conv_problem) != conv_tested_sizes.end()) {
continue;
}
//
// Test
//
// push back tested problem size to avoid re-running duplicates
conv_tested_sizes.push_back(conv_problem);
// test mode = xcross
passed &= testbed.run(conv_problem);
if (!passed) {
return false;
}
// test mode = convolution
passed &= testbed.run(conv_problem.reset_mode(cutlass::conv::Mode::kConvolution));
if (!passed) {
return false;
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace conv
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/conv/device/conv2d_with_absmax_testbed.h/0 | {
"file_path": "test/unit/conv/device/conv2d_with_absmax_testbed.h",
"repo_id": "test",
"token_count": 8980
} | 47 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <iomanip>
#include <utility>
#include <type_traits>
#include <vector>
#include <numeric>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
using namespace cute;
__global__ void
test(double const* g_in, double* g_out)
{
extern __shared__ double smem[];
smem[threadIdx.x] = g_in[threadIdx.x];
__syncthreads();
g_out[threadIdx.x] = 2 * smem[threadIdx.x];
}
__global__ void
test2(double const* g_in, double* g_out)
{
using namespace cute;
extern __shared__ double smem[];
auto s_tensor = make_tensor(make_smem_ptr(smem + threadIdx.x), Int<1>{});
auto g_tensor = make_tensor(make_gmem_ptr(g_in + threadIdx.x), Int<1>{});
copy(g_tensor, s_tensor);
cp_async_fence();
cp_async_wait<0>();
__syncthreads();
g_out[threadIdx.x] = 2 * smem[threadIdx.x];
}
TEST(SM80_CuTe_Ampere, CpAsync)
{
constexpr int count = 32;
thrust::host_vector<double> h_in(count);
for (int i = 0; i < count; ++i) {
h_in[i] = double(i);
}
thrust::device_vector<double> d_in(h_in);
thrust::device_vector<double> d_out(count, -1);
test<<<1, count, sizeof(double) * count>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<double> h_result = d_out;
thrust::device_vector<double> d_out_cp_async(count, -2);
test2<<<1, count, sizeof(double) * count>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out_cp_async.data()));
thrust::host_vector<double> h_result_cp_async = d_out_cp_async;
for (int i = 0; i < count; ++i) {
EXPECT_EQ(h_result[i], h_result_cp_async[i]);
}
}
| test/unit/cute/ampere/cp_async.cu/0 | {
"file_path": "test/unit/cute/ampere/cp_async.cu",
"repo_id": "test",
"token_count": 1191
} | 48 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cute/layout.hpp>
TEST(CuTe_core, WeaklyCongruent)
{
using namespace cute;
auto a = _1{};
auto b = _2{};
EXPECT_TRUE (weakly_congruent(a, a));
EXPECT_TRUE (weakly_congruent(b, b));
EXPECT_TRUE (weakly_congruent(a, b));
auto a0 = Shape<_1>{};
auto b0 = Shape<_2>{};
EXPECT_TRUE (weakly_congruent(a , a0));
EXPECT_TRUE (weakly_congruent(b , b0));
EXPECT_TRUE (weakly_congruent(a , b0));
EXPECT_TRUE (weakly_congruent(b , a0));
EXPECT_FALSE(weakly_congruent(a0, a ));
EXPECT_FALSE(weakly_congruent(b0, b ));
EXPECT_FALSE(weakly_congruent(a0, b ));
EXPECT_FALSE(weakly_congruent(b0, a ));
EXPECT_TRUE (weakly_congruent(a0, a0));
EXPECT_TRUE (weakly_congruent(b0, b0));
EXPECT_TRUE (weakly_congruent(a0, b0));
auto a1 = Shape<_1, _1>{};
EXPECT_TRUE (weakly_congruent(a , a1));
EXPECT_FALSE(weakly_congruent(a0, a1));
EXPECT_TRUE (weakly_congruent(a1, a1));
auto a2 = Shape<_1, Shape<_1,_1>>{};
EXPECT_TRUE (weakly_congruent(a , a2));
EXPECT_FALSE(weakly_congruent(a0, a2));
EXPECT_TRUE (weakly_congruent(a1, a2));
auto b1 = Shape<_2, _2>{};
EXPECT_TRUE (weakly_congruent(b , b1));
EXPECT_FALSE(weakly_congruent(b0, b1));
EXPECT_TRUE (weakly_congruent(a1, b1));
auto b2 = Shape<_2, Shape<_2,_2>>{};
EXPECT_FALSE(weakly_congruent(a2, b0));
EXPECT_FALSE(weakly_congruent(a2, a1));
EXPECT_TRUE (weakly_congruent(a2, b2));
auto b3 = Shape<Shape<_2,_2>, Shape<_2,_2>>{};
EXPECT_FALSE(weakly_congruent(a0, b3));
EXPECT_TRUE (weakly_congruent(a1, b3));
EXPECT_TRUE (weakly_congruent(a2, b3));
}
TEST(CuTe_core, WeaklyCompatible)
{
using namespace cute;
auto a = _16{};
auto b = _12{};
auto c = _8{};
EXPECT_TRUE (weakly_compatible(a, a));
EXPECT_TRUE (weakly_compatible(b, b));
EXPECT_TRUE (weakly_compatible(c, c));
EXPECT_FALSE(weakly_compatible(a, b));
EXPECT_FALSE(weakly_compatible(a, c));
EXPECT_TRUE (weakly_compatible(c, a));
auto a0 = Shape<_16>{};
EXPECT_TRUE (weakly_compatible(a0, a0));
EXPECT_TRUE (weakly_compatible(a , a0));
EXPECT_FALSE(weakly_compatible(a0, a ));
EXPECT_TRUE (weakly_compatible(c , a0));
EXPECT_FALSE(weakly_compatible(a0, c ));
EXPECT_FALSE(weakly_compatible(b , a0));
EXPECT_FALSE(weakly_compatible(a0, b ));
auto a1 = Shape<_2,_8>{};
EXPECT_TRUE (weakly_compatible(a1, a1));
EXPECT_TRUE (weakly_compatible(a , a1));
EXPECT_FALSE(weakly_compatible(a0, a1));
EXPECT_FALSE(weakly_compatible(a1, a0));
EXPECT_TRUE (weakly_compatible(a1, Shape<_2,Shape<_2,_4>>{}));
auto a2 = Shape<Shape<_2,_8>>{};
EXPECT_TRUE (weakly_compatible(a2, a2));
EXPECT_TRUE (weakly_compatible(a , a2));
EXPECT_TRUE (weakly_compatible(c , a2));
EXPECT_TRUE (weakly_compatible(a0, a2));
EXPECT_FALSE(weakly_compatible(a2, a0));
auto a3 = Shape<Shape<_2,Shape<_4,_2>>>{};
EXPECT_TRUE (weakly_compatible(a3, a3));
EXPECT_TRUE (weakly_compatible(a , a3));
EXPECT_TRUE (weakly_compatible(c , a3));
EXPECT_TRUE (weakly_compatible(a0, a3));
EXPECT_FALSE(weakly_compatible(a3, a0));
EXPECT_TRUE (weakly_compatible(a2, a3));
EXPECT_FALSE(weakly_compatible(a3, a2));
}
TEST(CuTe_core, SoftlyCompatible)
{
using namespace cute;
auto a = _16{};
auto b = _12{};
auto c = _8{};
EXPECT_TRUE (softly_compatible(a, a));
EXPECT_TRUE (softly_compatible(b, b));
EXPECT_TRUE (softly_compatible(c, c));
EXPECT_FALSE(softly_compatible(a, b));
EXPECT_TRUE (softly_compatible(a, c));
EXPECT_FALSE(softly_compatible(c, a));
auto a0 = Shape<_16>{};
EXPECT_TRUE (softly_compatible(a0, a0));
EXPECT_TRUE (softly_compatible(a , a0));
EXPECT_FALSE(softly_compatible(a0, a ));
EXPECT_FALSE(softly_compatible(c , a0));
EXPECT_FALSE(softly_compatible(a0, c ));
EXPECT_FALSE(softly_compatible(b , a0));
EXPECT_FALSE(softly_compatible(a0, b ));
auto a1 = Shape<_2,_8>{};
EXPECT_TRUE (softly_compatible(a1, a1));
EXPECT_TRUE (softly_compatible(a , a1));
EXPECT_FALSE(softly_compatible(a0, a1));
EXPECT_FALSE(softly_compatible(a1, a0));
EXPECT_TRUE (softly_compatible(a1, Shape<_2,Shape<_2,_4>>{}));
auto a2 = Shape<Shape<_2,_8>>{};
EXPECT_TRUE (softly_compatible(a2, a2));
EXPECT_TRUE (softly_compatible(a , a2));
EXPECT_FALSE(softly_compatible(c , a2));
EXPECT_TRUE (softly_compatible(a0, a2));
EXPECT_FALSE(softly_compatible(a2, a0));
auto a3 = Shape<Shape<_2,Shape<_4,_2>>>{};
EXPECT_TRUE (softly_compatible(a3, a3));
EXPECT_TRUE (softly_compatible(a , a3));
EXPECT_FALSE(softly_compatible(c , a3));
EXPECT_TRUE (softly_compatible(a0, a3));
EXPECT_FALSE(softly_compatible(a3, a0));
EXPECT_TRUE (softly_compatible(a2, a3));
EXPECT_FALSE(softly_compatible(a3, a2));
}
| test/unit/cute/core/int_tuple.cpp/0 | {
"file_path": "test/unit/cute/core/int_tuple.cpp",
"repo_id": "test",
"token_count": 2609
} | 49 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <cute/tensor.hpp>
#include "../cooperative_gemm_common.hpp"
using namespace cute;
TEST(SM70_CuTe_Volta, CooperativeGemm1_FloatFMA) {
using value_type = float;
constexpr uint32_t m = 64;
constexpr uint32_t n = 32;
constexpr uint32_t k = 16;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<UniversalFMA<value_type, value_type, value_type, value_type>>,
Layout<Shape<_16, _8, _1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm1_FloatFMA_Predication) {
using value_type = float;
constexpr uint32_t m = 88;
constexpr uint32_t n = 20;
constexpr uint32_t k = 12;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<UniversalFMA<value_type, value_type, value_type, value_type>>,
Layout<Shape<_2, _64, _1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm1_FloatFMA_Predication2) {
using value_type = float;
constexpr uint32_t m = 88;
constexpr uint32_t n = 36;
constexpr uint32_t k = 24;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<UniversalFMA<value_type, value_type, value_type, value_type>>,
Layout<Shape<_4, _32, _1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm1_FloatFMA_Predication3) {
using value_type = float;
constexpr uint32_t m = 67;
constexpr uint32_t n = 13;
constexpr uint32_t k = 11;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<UniversalFMA<value_type, value_type, value_type, value_type>>,
Layout<Shape<_1, _128, _1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm2_DoubleFMA) {
using value_type = double;
constexpr uint32_t m = 16;
constexpr uint32_t n = 32;
constexpr uint32_t k = 32;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t =
TiledMMA<
MMA_Atom<UniversalFMA<value_type, value_type, value_type, value_type>>,
Layout<Shape<_16, _8, _1>>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm3_Float_FMA_CustomPermutationMNK) {
using value_type = float;
constexpr uint32_t m = 32;
constexpr uint32_t n = 32;
constexpr uint32_t k = 32;
constexpr uint32_t thread_block_size = 256;
using tiled_mma_t = TiledMMA<
MMA_Atom<
UniversalFMA<value_type, value_type, value_type, value_type>
>,
Layout<
Shape<_16, _16, _1>
>,
Tile<
Layout<
Shape<_16,_2>, Stride<_2,_1>
>, // 32x32x1 MMA with perm for load vectorization
Layout<
Shape<_16,_2>, Stride<_2,_1>
>,
Underscore
>
>;
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm4_Half_MMA) {
using value_type = cutlass::half_t;
constexpr uint32_t m = 32;
constexpr uint32_t n = 32;
constexpr uint32_t k = 32;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t = TiledMMA<
MMA_Atom<SM70_8x8x4_F16F16F16F16_TN>,
Layout<Shape<_4, _4, _1>>
>;
using smem_a_atom_layout_t = typename tiled_mma_t::AtomLayoutB_TV;
using smem_b_atom_layout_t = typename tiled_mma_t::AtomLayoutA_TV;
using smem_c_atom_layout_t = decltype(make_layout(make_shape(Int<m> {}, Int<n> {})));
test_cooperative_gemm_col_major_layout<smem_a_atom_layout_t,
smem_b_atom_layout_t,
smem_c_atom_layout_t,
m,
n,
k,
thread_block_size,
tiled_mma_t,
value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm5_Half_MMA) {
using value_type = cutlass::half_t;
constexpr uint32_t m = 32;
constexpr uint32_t n = 32;
constexpr uint32_t k = 32;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t = TiledMMA<
MMA_Atom<SM70_8x8x4_F16F16F16F16_TN>,
Layout<Shape<_4, _4, _1>>
>;
using gmem_a_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<k>{})));
using gmem_b_layout_t = decltype(make_layout(make_shape(Int<n>{}, Int<k>{}), GenColMajor{}));
using gmem_c_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{})));
using smem_a_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<k>{})));
using smem_b_layout_t = decltype(make_layout(make_shape(Int<n>{}, Int<k>{}), GenColMajor{}));
using smem_c_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{})));
test_cooperative_gemm<gmem_a_layout_t,
gmem_b_layout_t,
gmem_c_layout_t,
smem_a_layout_t,
smem_b_layout_t,
smem_c_layout_t,
AutoVectorizingCopyWithAssumedAlignment<128>, // A
AutoVectorizingCopyWithAssumedAlignment<128>, // B
AutoVectorizingCopyWithAssumedAlignment<128>, // C
thread_block_size,
tiled_mma_t,
128,
value_type,
value_type,
value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm5_Half_MMA_Predicated) {
using value_type = cutlass::half_t;
constexpr uint32_t m = 31;
constexpr uint32_t n = 27;
constexpr uint32_t k = 17;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t = TiledMMA<
MMA_Atom<SM70_8x8x4_F16F16F16F16_TN>,
Layout<Shape<_4, _4, _1>>
>;
using gmem_a_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<k>{})));
using gmem_b_layout_t = decltype(make_layout(make_shape(Int<n>{}, Int<k>{}), GenColMajor{}));
using gmem_c_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{})));
using smem_a_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<k>{})));
using smem_b_layout_t = decltype(make_layout(make_shape(Int<n>{}, Int<k>{}), GenColMajor{}));
using smem_c_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{})));
test_cooperative_gemm<gmem_a_layout_t,
gmem_b_layout_t,
gmem_c_layout_t,
smem_a_layout_t,
smem_b_layout_t,
smem_c_layout_t,
AutoVectorizingCopyWithAssumedAlignment<16>, // A
AutoVectorizingCopyWithAssumedAlignment<16>, // B
AutoVectorizingCopyWithAssumedAlignment<16>, // C
thread_block_size,
tiled_mma_t,
16,
value_type,
value_type,
value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm6_Half_MAA_SwizzledSmemLayouts) {
using value_type = cutlass::half_t;
constexpr uint32_t m = 128;
constexpr uint32_t n = 128;
constexpr uint32_t k = 64;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t = TiledMMA<
MMA_Atom<SM70_8x8x4_F16F16F16F16_TN>,
Layout<Shape<_4, _4, _1>>
>;
using smem_a_atom_layout_t = decltype(
composition(Swizzle<3,3,3>{},
Layout<Shape < _8,_64>,
Stride<_64, _1>>{}));
using smem_b_atom_layout_t = decltype(
composition(Swizzle<3,3,3>{},
Layout<Shape <_64, _8>,
Stride< _1,_64>>{}));
using smem_c_atom_layout_t = decltype(make_layout(make_shape(Int<m>{}, Int<n>{}), GenRowMajor{}));
using gmem_a_layout_t = decltype(make_layout(make_shape(Int<m> {}, Int<k> {}), GenRowMajor{}));
using gmem_b_layout_t = decltype(make_layout(make_shape(Int<n> {}, Int<k> {}), GenColMajor{}));
using gmem_c_layout_t = decltype(make_layout(make_shape(Int<m> {}, Int<n> {}), GenRowMajor{}));
using smem_a_atom_layout_t = smem_a_atom_layout_t;
using smem_a_layout_t = decltype(tile_to_shape(
smem_a_atom_layout_t{},
make_shape(shape<0>(gmem_a_layout_t{}), shape<1>(gmem_a_layout_t{})))
);
// Transposed
using smem_b_atom_layout_t = smem_b_atom_layout_t;
using smem_b_layout_t = decltype(tile_to_shape(
smem_b_atom_layout_t{},
make_shape(shape<0>(gmem_b_layout_t{}), shape<1>(gmem_b_layout_t{})))
);
using smem_c_atom_layout_t = smem_c_atom_layout_t;
using smem_c_layout_t = decltype(tile_to_shape(
smem_c_atom_layout_t{},
make_shape(shape<0>(gmem_c_layout_t{}), shape<1>(gmem_c_layout_t{})))
);
test_cooperative_gemm<gmem_a_layout_t,
gmem_b_layout_t,
gmem_c_layout_t,
smem_a_layout_t,
smem_b_layout_t,
smem_c_layout_t,
AutoVectorizingCopyWithAssumedAlignment<128>, // A
AutoVectorizingCopyWithAssumedAlignment<128>, // B
AutoVectorizingCopyWithAssumedAlignment<128>, // C
thread_block_size,
tiled_mma_t,
128,
value_type,
value_type,
value_type>();
}
TEST(SM70_CuTe_Volta, CooperativeGemm7_TransformNegate_FMA) {
using TA = float;
using TB = float;
using TC = double;
constexpr uint32_t m = 32;
constexpr uint32_t n = 32;
constexpr uint32_t k = 32;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t = TiledMMA<
MMA_Atom<UniversalFMA<TC, TA, TB, TC>>,
Layout<Shape<_16, _8, _1>>
>;
auto aload = cute::negate {};
auto bload = cute::negate {};
auto cload = cute::negate {};
auto cstore = cute::negate {};
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, 64, TA, TB, TC>(
aload, bload, cload, cstore);
}
TEST(SM70_CuTe_Volta, CooperativeGemm7_TransformNegate_MMA) {
using value_type = cutlass::half_t;
constexpr uint32_t m = 32;
constexpr uint32_t n = 32;
constexpr uint32_t k = 32;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t = TiledMMA<
MMA_Atom<SM70_8x8x4_F16F16F16F16_TN>,
Layout<Shape<_4, _4, _1>>
>;
auto aload = cute::negate {};
auto bload = cute::negate {};
auto cload = cute::negate {};
auto cstore = cute::negate {};
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, value_type>(
aload, bload, cload, cstore);
}
template<class ConstantType>
struct increment_by_x {
ConstantType x;
template <class T>
CUTE_HOST_DEVICE constexpr
T operator()(const T& arg) const {
return arg + x;
}
};
template<class From, class To>
struct convert_to {
CUTE_HOST_DEVICE constexpr
To operator()(const From& arg) const {
return static_cast<To>(arg);
}
};
TEST(SM70_CuTe_Volta, CooperativeGemm7_TransformCustomOp_FMA) {
using TA = float;
using TB = float;
using TC = double;
constexpr uint32_t m = 32;
constexpr uint32_t n = 32;
constexpr uint32_t k = 32;
constexpr uint32_t thread_block_size = 128;
using tiled_mma_t = TiledMMA<
MMA_Atom<UniversalFMA<TC, TA, TB, TC>>,
Layout<Shape<_16, _8, _1>>
>;
auto aload = increment_by_x<float>{1.111f};
auto bload = convert_to<float, double> {};
auto cload = cute::negate {};
auto cstore = cute::negate {};
test_cooperative_gemm_col_major_layout<m, n, k, thread_block_size, tiled_mma_t, 64, TA, TB, TC>(
aload, bload, cload, cstore);
}
| test/unit/cute/volta/cooperative_gemm.cu/0 | {
"file_path": "test/unit/cute/volta/cooperative_gemm.cu",
"repo_id": "test",
"token_count": 6602
} | 50 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_gelu.h"
#include "cutlass/epilogue/thread/activation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination, device_side_f16_f32_value) {
using Element = float;
using ElementOutput = cutlass::half_t;
int const kCount = 8;
using LinearCombination = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kCount,
Element,
Element>;
Element alpha = Element(2);
Element beta = Element(1);
typename LinearCombination::Params params(alpha, beta);
LinearCombination linear_combination_op(params);
cutlass::Array<ElementOutput, kCount> source;
cutlass::Array<Element, kCount> accum;
for (int i = 0; i < kCount; ++i) {
accum[i] = Element(i * 2);
source[i] = ElementOutput((i * 7 % 9) - 4);
}
cutlass::Array<ElementOutput, kCount> destination = linear_combination_op(accum, source);
for (int i = 0; i < kCount; ++i) {
ElementOutput expected = ElementOutput(
alpha * accum[i] +
beta * Element(ElementOutput(source[i]))
);
ElementOutput got = destination[i];
EXPECT_TRUE(expected == got);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination, device_side_f16_f32_ptr) {
using Element = float;
using ElementOutput = cutlass::half_t;
int const kCount = 8;
using LinearCombination = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kCount,
Element,
Element>;
Element alpha = Element(2);
Element beta = Element(1);
typename LinearCombination::Params params(&alpha, &beta);
LinearCombination linear_combination_op(params);
cutlass::Array<ElementOutput, kCount> source;
cutlass::Array<Element, kCount> accum;
for (int i = 0; i < kCount; ++i) {
accum[i] = Element(i * 2);
source[i] = ElementOutput((i * 7 % 9) - 4);
}
cutlass::Array<ElementOutput, kCount> destination = linear_combination_op(accum, source);
for (int i = 0; i < kCount; ++i) {
ElementOutput expected = ElementOutput(
alpha * accum[i] +
beta * Element(ElementOutput(source[i]))
);
ElementOutput got = destination[i];
EXPECT_TRUE(expected == got);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_gelu, device_side_f16_f16_ptr) {
using Element = cutlass::half_t;
using ElementOutput = cutlass::half_t;
int const kCount = 8;
using LinearCombinationGELU = cutlass::epilogue::thread::LinearCombinationGELU<
ElementOutput,
kCount,
Element,
Element>;
Element alpha = Element(1);
Element beta = Element(0);
typename LinearCombinationGELU::Params params(&alpha, &beta);
LinearCombinationGELU linear_combination_op(params);
cutlass::Array<Element, kCount> accum;
for (int i = 0; i < kCount; ++i) {
accum[i] = Element((float)i * 0.3f);
}
cutlass::Array<ElementOutput, kCount> destination = linear_combination_op(accum, accum);
cutlass::epilogue::thread::GELU<ElementOutput> gelu_func;
for (int i = 0; i < kCount; ++i) {
ElementOutput expected = gelu_func(accum[i]);
ElementOutput got = destination[i];
EXPECT_TRUE(expected == got);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Epilogue_thread_linear_combination_gelu_taylor, device_side_f16_f16_ptr) {
using Element = cutlass::half_t;
using ElementOutput = cutlass::half_t;
int const kCount = 8;
using LinearCombinationGELU = cutlass::epilogue::thread::LinearCombinationGELU<
ElementOutput,
kCount,
Element,
Element>;
Element alpha = Element(1);
Element beta = Element(0);
typename LinearCombinationGELU::Params params(&alpha, &beta);
LinearCombinationGELU linear_combination_op(params);
cutlass::Array<Element, kCount> accum;
for (int i = 0; i < kCount; ++i) {
accum[i] = Element((float)i * 0.3f);
}
cutlass::Array<ElementOutput, kCount> destination = linear_combination_op(accum, accum);
cutlass::epilogue::thread::GELU<ElementOutput> gelu_func;
for (int i = 0; i < kCount; ++i) {
ElementOutput expected = gelu_func(accum[i]);
ElementOutput got = destination[i];
EXPECT_TRUE(expected == got);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/epilogue/thread/linear_combination.cu/0 | {
"file_path": "test/unit/epilogue/thread/linear_combination.cu",
"repo_id": "test",
"token_count": 2037
} | 51 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide Symm update interface
*/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/blas3.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/host/error_metrics.h"
#include "cutlass/util/reference/host/symm.h"
#include "cutlass/util/reference/host/symm_complex.h"
#include "testbed_utils.h"
namespace test {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Symm>
struct TestbedSymmUniversal {
using ElementA = typename Symm::ElementA;
using ElementB = typename Symm::ElementB;
using ElementC = typename Symm::ElementC;
using ElementAccumulator = typename Symm::ElementAccumulator;
using ElementCompute = typename Symm::SymmKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
uint64_t seed;
cutlass::HostTensor<typename Symm::ElementA, typename Symm::LayoutA> tensor_A;
cutlass::HostTensor<typename Symm::ElementB, typename Symm::LayoutB> tensor_B;
cutlass::HostTensor<typename Symm::ElementC, typename Symm::LayoutC> tensor_C;
cutlass::HostTensor<typename Symm::ElementC, typename Symm::LayoutC> tensor_D;
cutlass::HostTensor<typename Symm::ElementC, typename Symm::LayoutC> reference_D;
//
// Methods
//
TestbedSymmUniversal(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed,
int mantissa_in_bits) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Symm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillRandomUniform(
view, seed, scope_max, scope_min, mantissa_in_bits);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, mantissa_in_bits);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else {
EXPECT_TRUE(false) << "Input distribution not implemented";
return false;
}
return true;
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_symmetric_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed,
int mantissa_in_bits) {
if (dist_kind == cutlass::Distribution::Uniform) {
double scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<typename Symm::ElementC>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 5;
scope_min = -5;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::host::TensorFillSymmetricRandomUniform(
view, seed, Symm::kFillModeA, scope_max, scope_min, mantissa_in_bits);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillSymmetricRandomGaussian(
view, seed, Symm::kFillModeA, 0, 0.5, mantissa_in_bits);
}
else {
EXPECT_TRUE(false) << "Input distribution (symmetric tensor) not implemented";
return false;
}
return true;
}
/// Initializes data structures
void initialize(cutlass::gemm::GemmCoord problem_size) {
//
// Allocate the Symm workspace
//
if (Symm::kSideModeA == cutlass::SideMode::kLeft) {
tensor_A.resize(cutlass::make_Coord(problem_size.m(),problem_size.m()));
}
else if (Symm::kSideModeA == cutlass::SideMode::kRight) {
tensor_A.resize(cutlass::make_Coord(problem_size.n(),problem_size.n()));
}
tensor_B.resize(problem_size.mn());
tensor_C.resize(problem_size.mn());
tensor_D.resize(problem_size.mn());
reference_D.resize(problem_size.mn(), false);
EXPECT_TRUE(initialize_symmetric_tensor(tensor_A.host_view(), init_A, seed + 2019, cutlass::MantissaInBits<typename Symm::ElementA>::bits));
EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018, cutlass::MantissaInBits<typename Symm::ElementB>::bits));
EXPECT_TRUE(initialize_tensor(tensor_C.host_view(), init_C, seed + 2017, cutlass::MantissaInBits<typename Symm::ElementC>::bits));
// It is possible to randomly initialize to all zeros, so override this with non-zeros
// in the upper left corner of each operand.
tensor_A.host_view().at({0, 0}) = typename Symm::ElementA(1);
tensor_B.host_view().at({0, 0}) = typename Symm::ElementB(1);
tensor_C.host_view().at({0, 0}) = typename Symm::ElementC(1);
cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view());
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.sync_device();
tensor_D.sync_device();
}
/// Compares computed reference with device reference and outputs to a file if incorrect
bool compare_reference(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
tensor_D.sync_host();
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0);
if (tensor_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0);
if (reference_D.size() > 1)
EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0);
double l2_norm = cutlass::reference::host::TensorRelativeErrorMetric(reference_D.host_view(), tensor_D.host_view());
bool passed = l2_norm < cutlass::MantissaInBits<typename Symm::ElementA>::error;
return passed;
}
/// Verifies the result is a Symm
bool verify(
cutlass::gemm::GemmCoord problem_size,
ElementCompute alpha,
ElementCompute beta) {
//
// Verify
//
using HostReference = typename cutlass::platform::conditional<
(cutlass::platform::is_same<typename Symm::ElementC,
cutlass::complex<double>
>::value ||
cutlass::platform::is_same<typename Symm::ElementC,
cutlass::complex<float>
>::value
),
cutlass::reference::host::SymmComplex<
typename Symm::ElementA, typename Symm::LayoutA,
Symm::kSideModeA, Symm::kFillModeA,
typename Symm::ElementB, typename Symm::LayoutB,
typename Symm::ElementC, typename Symm::LayoutC,
ElementCompute,
ElementAccumulator,
Symm::kBlasMode>,
cutlass::reference::host::Symm<
typename Symm::ElementA, typename Symm::LayoutA,
Symm::kSideModeA, Symm::kFillModeA,
typename Symm::ElementB, typename Symm::LayoutB,
typename Symm::ElementC, typename Symm::LayoutC,
ElementCompute,
ElementAccumulator>
>::type;
HostReference reference_symm;
reference_symm(
problem_size,
alpha,
tensor_A.host_ref(),
tensor_B.host_ref(),
beta,
tensor_C.host_ref(),
reference_D.host_ref(),
ElementAccumulator(0)
);
return compare_reference(problem_size, alpha, beta);
}
/// Returns true if the CUDA device is sufficient to execute the kernel.
bool sufficient() const {
//
// Determine SMEM requirements and waive if not satisfied
//
size_t smem_size = sizeof(typename Symm::SymmKernel::SharedStorage);
cudaDeviceProp properties;
int device_idx;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() API call failed.");
}
result = cudaGetDeviceProperties(&properties, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
if (properties.sharedMemPerBlockOptin < smem_size) {
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmUniversalMode mode,
cutlass::gemm::GemmCoord problem_size,
int batch_count = 1,
ElementCompute alpha = ElementCompute(1),
ElementCompute beta = ElementCompute(0)) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
#if 0
std::cout << "[TestbedSymmUniversal::run()] problem(m, n, k): " << problem_size
<< " alpha: " << ElementCompute(alpha)
<< " beta: " << ElementCompute(beta) << std::endl;
#endif
this->initialize(problem_size);
//
// Initialize the Symm operator
//
int batch_stride_A;
if (Symm::kSideModeA == cutlass::SideMode::kLeft)
batch_stride_A = problem_size.m()*problem_size.m();
if (Symm::kSideModeA == cutlass::SideMode::kRight)
batch_stride_A = problem_size.n()*problem_size.n();
typename Symm::Arguments arguments{
mode,
problem_size,
batch_count,
{alpha, beta},
tensor_A.device_data(),
tensor_B.device_data(),
tensor_C.device_data(),
tensor_D.device_data(),
batch_stride_A,
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
problem_size.m() * problem_size.n(),
tensor_A.layout().stride(0),
tensor_B.layout().stride(0),
tensor_C.layout().stride(0),
tensor_D.layout().stride(0)
};
Symm symm_op;
size_t workspace_size = Symm::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
cutlass::Status status = symm_op.initialize(arguments, workspace.get());
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Run the Symm
//
status = symm_op();
EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status);
//
// Verify
//
bool passed = this->verify(problem_size, alpha, beta);
//if (true) {
if (!passed) {
std::stringstream fname;
fname << "error_"
<< (Symm::kBlasMode == cutlass::BlasMode::kSymmetric ? "symm_" : "hemm_" )
<< "device_"
<< "fill_mode_a_"
<< (Symm::kSideModeA == cutlass::SideMode::kLeft ? "leftside_" :
(Symm::kSideModeA == cutlass::SideMode::kRight ? "rightside_" : "invalid_"))
<< (Symm::kFillModeA == cutlass::FillMode::kLower ? "lower_" :
(Symm::kFillModeA == cutlass::FillMode::kUpper ? "upper_" : "invalid_"))
<< "mnk_"
<< problem_size.m() << "x"
<< problem_size.n() << "x"
<< problem_size.k() << "_"
<< Symm::ThreadblockShape::kM << "x"
<< Symm::ThreadblockShape::kN << "x"
<< Symm::ThreadblockShape::kK << "_"
<< Symm::WarpShape::kM << "x"
<< Symm::WarpShape::kN << "x"
<< Symm::WarpShape::kK << ".txt";
std::cout << fname.str() << std::endl;
std::ofstream results(fname.str());
results << problem_size << std::endl;
results
<< "alpha: " << ElementCompute(alpha) << "\n"
<< "beta: " << ElementCompute(beta) << "\n"
<< "\nA:\n" << tensor_A.host_view() << "\n"
<< "\nB:\n" << tensor_B.host_view() << "\n"
<< "\nC:\n" << tensor_C.host_view() << "\n"
<< "\nD reference:\n" << reference_D.host_view() << "\n"
<< "\nD computed:\n" << tensor_D.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Symm>
bool TestsymmUniversal(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmUniversalMode mode,
int batch_count,
double alpha = 1.0,
double beta = 2.0) {
bool passed = true;
TestbedSymmUniversal<Symm> testbed;
using ElementCompute = typename Symm::EpilogueOutputOp::ElementCompute;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
return passed;
}
template <typename Symm>
bool TestAllSymmUniversal() {
bool passed = true;
int const kMinimumOperandElementSize = int(cutlass::sizeof_bits<typename Symm::ElementA>::value);
int const kAlignment = cutlass::platform::is_same<
typename Symm::OperatorClass,
cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize;
// int8_t gemm alignment constraints
int const kAlignmentM = cutlass::platform::is_same<typename Symm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Symm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Symm::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment;
int const kAlignmentN = kAlignmentM;
int const kAlignmentK = cutlass::platform::is_same<typename Symm::OperatorClass, cutlass::arch::OpClassSimt>::value &&
cutlass::platform::is_same<typename Symm::ElementA, int8_t>::value &&
cutlass::platform::is_same<typename Symm::LayoutA, cutlass::layout::RowMajor>::value
? 4 : kAlignment;
cutlass::gemm::GemmUniversalMode modes[] = {
cutlass::gemm::GemmUniversalMode::kGemm,
};
int problem_size_m[] = {
kAlignmentK,
Symm::ThreadblockShape::kK * Symm::kStages - kAlignmentK,
Symm::ThreadblockShape::kK * Symm::kStages * 3 - kAlignmentK
};
int problem_size_n[] = {
kAlignmentN, 512 - 2*kAlignmentN
};
int batch_counts[] = { // may be interpretted as batch count or split-K slices
1 // Just running one batch for now (removing 2, 3, 5, 7)
};
double problem_alpha[] = {
1.0, 3.0
};
double problem_beta[] = {
0, 2.0
};
using ElementCompute = typename Symm::EpilogueOutputOp::ElementCompute;
for (cutlass::gemm::GemmUniversalMode mode : modes) {
for (int m : problem_size_m) {
for (int n : problem_size_n) {
for (int batch_count : batch_counts) {
for (auto alpha : problem_alpha) {
for (auto beta : problem_beta) {
int k = 0;
if (Symm::kSideModeA == cutlass::SideMode::kLeft)
k = m;
else if (Symm::kSideModeA == cutlass::SideMode::kRight)
k = n;
if (mode == cutlass::gemm::GemmUniversalMode::kGemm ||
mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) {
#if 0
// skip very small K problems
if (k / batch_count < 2 * Symm::ThreadblockShape::kK) {
continue;
}
#endif
}
cutlass::gemm::GemmCoord problem_size(m, n, k);
TestbedSymmUniversal<Symm> testbed;
passed = testbed.run(
mode,
problem_size,
batch_count,
cutlass::from_real<ElementCompute>(alpha),
cutlass::from_real<ElementCompute>(beta)
);
if (!passed) {
return false;
}
}
}
}
}
}
}
return passed;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
| test/unit/gemm/device/testbed_symm_universal.h/0 | {
"file_path": "test/unit/gemm/device/testbed_symm_universal.h",
"repo_id": "test",
"token_count": 8569
} | 52 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#pragma once
#include "cutlass/gemm/thread/mma.h"
#include "cutlass/layout/vector.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/gemm.h"
namespace test {
namespace gemm {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Thread-level matrix multiply-accumulate
template <typename Mma>
void kernel(
typename Mma::ElementC *D,
typename Mma::ElementA const *A,
typename Mma::ElementB const *B,
typename Mma::ElementC const *C) {
auto ptr_D = reinterpret_cast<cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> *>(D);
auto ptr_A = reinterpret_cast<cutlass::Array<typename Mma::ElementA, Mma::Shape::kMK> const *>(A);
auto ptr_B = reinterpret_cast<cutlass::Array<typename Mma::ElementB, Mma::Shape::kKN> const *>(B);
auto ptr_C = reinterpret_cast<cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> const *>(C);
Mma mma;
auto a = *ptr_A;
auto b = *ptr_B;
auto c = *ptr_C;
using Btype = typename Mma::ElementB;
cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> d;
mma(d, a, b, c);
*ptr_D = d;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC
>
struct Testbed {
/// Thread-level matrix multiply-accumulate operator
using Mma = cutlass::gemm::thread::Mma<
Shape,
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC
>;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> tensor_A;
cutlass::HostTensor<ElementB, LayoutB> tensor_B;
cutlass::HostTensor<ElementC, LayoutC> tensor_C;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed;
cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference;
//
// Methods
//
/// Allocates workspace in device memory
Testbed() {
tensor_A.reset(cutlass::make_Coord(Shape::kM, Shape::kK), false);
tensor_B.reset(cutlass::make_Coord(Shape::kK, Shape::kN), false);
tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false);
}
/// Runs the test
bool run() {
//
// initialize device memory
//
cutlass::reference::host::detail::RandomUniformFunc< ElementA > tfill_rand_func(
0, // seed
10, // max
0, // min
0); // bits after decimal
cutlass::reference::host::detail::TensorFillRandomUniformFunc< ElementA, LayoutA > tfill_rand(
tensor_A.host_view(),
tfill_rand_func);
for (auto i=0; i< Shape::kM; i++)
for (auto j=0; j< Shape::kK; j++)
tfill_rand(cutlass::make_Coord(i,j));
cutlass::reference::host::BlockFillSequential(
tensor_B.host_data(),
tensor_B.capacity(),
ElementB(1),
ElementB(2)
);
cutlass::reference::host::TensorFill(
tensor_C.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_computed.host_view(),
ElementC(0)
);
cutlass::reference::host::TensorFill(
tensor_D_reference.host_view(),
ElementC(0)
);
// Host side call
kernel<Mma>(
tensor_D_computed.host_data(),
tensor_A.host_data(),
tensor_B.host_data(),
tensor_C.host_data());
//
// Reference implementation
//
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC>
reference_gemm;
reference_gemm(
{Shape::kM, Shape::kN, Shape::kK},
ElementC(1),
tensor_A.host_ref(),
tensor_B.host_ref(),
ElementC(0),
tensor_D_reference.host_ref()
);
//
// Verify equivalence
//
// compare
bool passed = cutlass::reference::host::TensorEquals(
tensor_D_computed.host_view(),
tensor_D_reference.host_view()
);
EXPECT_TRUE(passed)
<< "A:\n" << tensor_A.host_view() << "\n\n"
<< "B:\n" << tensor_B.host_view() << "\n\n"
<< "C:\n" << tensor_C.host_view() << "\n\n"
<< "Reference:\n" << tensor_D_reference.host_view() << "\n\n"
<< "Computed:\n" << tensor_D_computed.host_view() << std::endl;
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace gemm
} // namespace test
| test/unit/gemm/thread/host/testbed_host.h/0 | {
"file_path": "test/unit/gemm/thread/host/testbed_host.h",
"repo_id": "test",
"token_count": 2631
} | 53 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit testbed for kernel-level GEMM
*/
#pragma once
#include <fstream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/aligned_buffer.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/vector.h"
#include "cutlass/numeric_types.h"
#include "cutlass/core_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h"
#include "cutlass/cutlass.h"
#include "cutlass/platform/platform.h"
namespace test {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Mma>
__global__ void kernel_mma(cutlass::gemm::GemmCoord problem_size,
typename Mma::IteratorA::Params params_A,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::Params params_B,
typename Mma::IteratorB::TensorRef ref_B,
typename Mma::ElementC *ptr_C,
typename Mma::LayoutC::Stride::Index ldc) {
// Shared storage needed by threadblock-scoped matrix multiply-accumulate
__shared__ typename Mma::SharedStorage shared_storage;
// Compute threadblock location
cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y),
0};
cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM,
tb_tile_offset.k()};
cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(),
tb_tile_offset.n() * Mma::Shape::kN};
// Compute position within threadblock
int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(params_A, ref_A.data(),
{problem_size.m(), problem_size.k()},
tb_thread_id, tb_offset_A);
typename Mma::IteratorB iterator_B(params_B, ref_B.data(),
{problem_size.k(), problem_size.n()},
tb_thread_id, tb_offset_B);
int warp_id = threadIdx.y;
int lane_id = threadIdx.x;
// Construct thread-scoped matrix multiply
Mma mma(shared_storage, tb_thread_id, warp_id, threadIdx.x);
typename Mma::FragmentC accum;
accum.clear();
int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum);
// Output results
typename Mma::Operator::IteratorC iterator_C({ptr_C, ldc}, lane_id);
iterator_C.add_tile_offset(
{(tb_tile_offset.m() * Mma::WarpCount::kM) +
(warp_id % Mma::WarpCount::kM),
(tb_tile_offset.n() * Mma::WarpCount::kN) +
(warp_id / Mma::WarpCount::kM)});
iterator_C.store(accum);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product
template <
/// Threadblock-level matrix multiply-accumulate
typename MmaCore_,
/// Number of stages
int Stages = 2>
struct Testbed {
/// Threadblock-level GEMM implementation
using MmaCore = MmaCore_;
using ThreadblockShape = typename MmaCore::Shape;
using WarpShape = typename MmaCore::WarpShape;
using InstructionShape = typename MmaCore::InstructionShape;
using ElementA = typename MmaCore::ElementA;
using LayoutA = typename MmaCore::LayoutA;
using ElementB = typename MmaCore::ElementB;
using LayoutB = typename MmaCore::LayoutB;
using ElementC = typename MmaCore::ElementC;
using LayoutC = typename MmaCore::LayoutC;
static const int kStages = Stages;
// Define iterators over tiles from the A operand
static const bool use_idp4a = cutlass::platform::is_same<ElementA, int8_t>::value &&
cutlass::platform::is_same<ElementB, int8_t>::value &&
cutlass::platform::is_same<typename MmaCore::OperatorClass, cutlass::arch::OpClassSimt>::value;
static const bool transposeA = cutlass::platform::is_same< LayoutA, cutlass::layout::ColumnMajor >::value;
static const bool transposeB = cutlass::platform::is_same< LayoutB, cutlass::layout::RowMajor >::value;
using IteratorA = typename cutlass::platform::conditional< use_idp4a,
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA, transposeA> ,
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA, 1, typename MmaCore::IteratorThreadMapA>
>::type;
// Define iterators over tiles from the B operand
using IteratorB = typename cutlass::platform::conditional< use_idp4a,
cutlass::transform::threadblock::PredicatedTileIterator2dThreadTile<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB, transposeB> ,
cutlass::transform::threadblock::PredicatedTileIterator<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB, 0, typename MmaCore::IteratorThreadMapB>
>::type;
// Define MmaPipeline Single Stage
using MmaPipelineSingleStage = cutlass::gemm::threadblock::MmaSingleStage<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementC, LayoutC,
typename MmaCore::MmaPolicy>;
// Define MmaPipeline Two Stages
using MmaPipelineTwoStages = cutlass::gemm::threadblock::MmaPipelined<
typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
IteratorB, typename MmaCore::SmemIteratorB, ElementC, LayoutC,
typename MmaCore::MmaPolicy>;
// Define the threadblock-scoped pipelined matrix multiply (Select between Single vs. Two stages)
using Mma = typename cutlass::platform::conditional<(kStages==1), MmaPipelineSingleStage, MmaPipelineTwoStages>::type;
//
// Data members
//
cutlass::HostTensor<ElementA, LayoutA> matrix_A;
cutlass::HostTensor<ElementB, LayoutB> matrix_B;
cutlass::HostTensor<ElementC, LayoutC> matrix_C_computed;
cutlass::HostTensor<ElementC, LayoutC> matrix_C_reference;
cutlass::gemm::GemmCoord problem_size;
float alpha, beta;
//
// Methods
//
/// Allocates workspace in device memory
Testbed(int m, int n, int k, float alpha_, float beta_)
: problem_size(m, n, k), alpha(alpha_), beta(beta_) {
matrix_A.reset(cutlass::make_Coord(m, k));
matrix_B.reset(cutlass::make_Coord(k, n));
matrix_C_computed.reset(cutlass::make_Coord(m, n));
matrix_C_reference.reset(cutlass::make_Coord(m, n), false);
}
bool sufficient() {
return true;
}
/// Runs the test
bool run(
dim3 grid, dim3 block,
cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform) {
// Waive test if insufficient CUDA device
if (!sufficient()) {
if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
std::cerr << "Test waived due to insufficient CUDA device." << std::endl;
}
return true;
}
//
// initialize device memory
//
if (init_A == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementA>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementA>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_A.host_view(), seed, scope_max, scope_min, 0);
} else if (init_A == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(matrix_A.host_data(),
matrix_A.capacity());
} else if (init_A == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(matrix_A.host_view());
} else {
return false;
}
if (init_B == cutlass::Distribution::Uniform) {
int scope_max = 8;
int scope_min = -8;
if (cutlass::sizeof_bits<ElementB>::value == 4) {
scope_max = 2;
scope_min = -2;
} else if (cutlass::sizeof_bits<ElementB>::value == 1) {
scope_max = 2;
scope_min = 0;
}
uint64_t seed = 7;
cutlass::reference::host::TensorFillRandomUniform(
matrix_B.host_view(), seed + 16, scope_max, scope_min, 0);
} else if (init_B == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(matrix_B.host_data(),
matrix_B.capacity());
} else if (init_B == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(matrix_B.host_view());
} else {
return false;
}
cutlass::reference::host::TensorFill(matrix_C_computed.host_view());
cutlass::reference::host::TensorFill(matrix_C_reference.host_view());
matrix_A.sync_device();
matrix_B.sync_device();
matrix_C_computed.sync_device();
typename IteratorA::Params params_A(matrix_A.layout());
typename IteratorB::Params params_B(matrix_B.layout());
test::gemm::threadblock::kernel_mma<Mma><<<grid, block>>>(
problem_size, params_A, matrix_A.device_ref(), params_B,
matrix_B.device_ref(), matrix_C_computed.device_data(),
matrix_C_computed.layout().stride(0));
//
// Check error code
//
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess)
<< " kernel error: " << cudaGetErrorString(result) << " on device " << GetCudaDevice();
matrix_C_computed.sync_host();
cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
ElementC, LayoutC, ElementC, ElementC,
typename MmaCore::Operator>
reference_gemm;
reference_gemm(
problem_size, ElementC(alpha), matrix_A.host_view(),
matrix_B.host_view(), ElementC(beta), matrix_C_reference.host_view());
bool passed = cutlass::reference::host::TensorEquals(
matrix_C_computed.host_view(), matrix_C_reference.host_view());
EXPECT_TRUE(passed) << "Failed on device " << GetCudaDevice();
if (!passed) {
std::ofstream output("mma_pipelined_testbed_errors.txt");
output
<< "A:\n" << matrix_A.host_view() << "\n"
<< "B:\n" << matrix_B.host_view() << "\n"
<< "Reference:\n"
<< matrix_C_reference.host_view() << "\n"
<< "Computed:\n"
<< matrix_C_computed.host_view() << "\n";
}
return passed;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace test
| test/unit/gemm/threadblock/mma_pipelined_testbed.h/0 | {
"file_path": "test/unit/gemm/threadblock/mma_pipelined_testbed.h",
"repo_id": "test",
"token_count": 5385
} | 54 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#pragma once
#include "cutlass/array.h"
namespace test {
namespace nvrtc {
namespace kernel {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Thread-level matrix multiply-accumulate
template <typename Mma>
__global__ void testbed_kernel(
typename Mma::ElementC *D,
typename Mma::ElementA const *A,
typename Mma::ElementB const *B,
typename Mma::ElementC const *C) {
auto ptr_D = reinterpret_cast<cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> *>(D);
auto ptr_A = reinterpret_cast<cutlass::Array<typename Mma::ElementA, Mma::Shape::kMK> const *>(A);
auto ptr_B = reinterpret_cast<cutlass::Array<typename Mma::ElementB, Mma::Shape::kKN> const *>(B);
auto ptr_C = reinterpret_cast<cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> const *>(C);
Mma mma;
auto a = *ptr_A;
auto b = *ptr_B;
auto c = *ptr_C;
cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> d;
mma(d, a, b, c);
*ptr_D = d;
}
}
}
}
}
| test/unit/nvrtc/kernel/thread/testbed_kernel.h/0 | {
"file_path": "test/unit/nvrtc/kernel/thread/testbed_kernel.h",
"repo_id": "test",
"token_count": 861
} | 55 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Layout type identifier
enum class LayoutTypeID {
kUnknown,
kColumnMajor,
kRowMajor,
kColumnMajorInterleavedK2,
kRowMajorInterleavedK2,
kColumnMajorInterleavedK4,
kRowMajorInterleavedK4,
kColumnMajorInterleavedK16,
kRowMajorInterleavedK16,
kColumnMajorInterleavedK32,
kRowMajorInterleavedK32,
kColumnMajorInterleavedK64,
kRowMajorInterleavedK64,
kTensorNCHW,
kTensorNCDHW,
kTensorNHWC,
kTensorNDHWC,
kTensorNC32HW32,
kTensorC32RSK32,
kTensorNC64HW64,
kTensorC64RSK64,
kInvalid
};
/// Numeric data type
enum class NumericTypeID {
kUnknown,
kVoid,
kB1,
kU2,
kU4,
kU8,
kU16,
kU32,
kU64,
kS2,
kS4,
kS8,
kS16,
kS32,
kS64,
kFE4M3,
kFE5M2,
kF16,
kBF16,
kTF32,
kF32,
kF64,
kCF16,
kCBF16,
kCF32,
kCTF32,
kCF64,
kCS2,
kCS4,
kCS8,
kCS16,
kCS32,
kCS64,
kCU2,
kCU4,
kCU8,
kCU16,
kCU32,
kCU64,
kInvalid
};
/// Enumerated type describing a transformation on a complex value.
enum class ComplexTransform {
kNone,
kConjugate,
kInvalid
};
/// Providers
enum class Provider {
kNone,
kCUTLASS,
kReferenceHost,
kReferenceDevice,
kCUBLAS,
kCUDNN,
kInvalid
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumeration indicating the kind of operation
enum class OperationKind {
kGemm,
kRankK,
kRank2K,
kTrmm,
kSymm,
kConv2d,
kConv3d,
kEqGemm,
kSparseGemm,
kReduction,
kInvalid
};
/// Enumeration indicating whether scalars are in host or device memory
enum class ScalarPointerMode {
kHost,
kDevice,
kInvalid
};
/// Describes how reductions are performed across threadblocks
enum class SplitKMode {
kNone,
kSerial,
kParallel,
kParallelSerial,
kInvalid
};
/// Indicates the classificaition of the math instruction
enum class OpcodeClassID {
kSimt,
kTensorOp,
kWmmaTensorOp,
kSparseTensorOp,
kInvalid
};
enum class MathOperationID {
kAdd,
kMultiplyAdd,
kMultiplyAddSaturate,
kMultiplyAddMixedInputUpcast,
kMultiplyAddFastBF16,
kMultiplyAddFastF16,
kMultiplyAddFastF32,
kMultiplyAddComplex,
kMultiplyAddComplexFastF32,
kMultiplyAddGaussianComplex,
kXorPopc,
kInvalid
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumeration indicating what kind of GEMM operation to perform
enum class GemmKind {
kGemm,
kSparse,
kUniversal,
kPlanarComplex,
kPlanarComplexArray,
kGrouped,
kInvalid
};
/// Enumeration indicating what kind of RankK update operation to perform
enum class RankKKind {
kUniversal,
kInvalid
};
/// Enumeration indicating what kind of TRMM operation to perform
enum class TrmmKind {
kUniversal,
kInvalid
};
/// Enumeration indicating what kind of SYMM/HEMM operation to perform
enum class SymmKind {
kUniversal,
kInvalid
};
/// Enumeration indicating what kind of Conv2d operation to perform
enum class ConvKind {
kUnknown,
kFprop,
kDgrad,
kWgrad,
kInvalid
};
enum class ConvModeID {
kCrossCorrelation,
kConvolution,
kInvalid
};
// Iterator algorithm enum in order of general performance-efficiency
enum class IteratorAlgorithmID {
kNone,
kAnalytic,
kOptimized,
kFixedChannels,
kFewChannels,
kInvalid
};
enum class EpilogueKind {
kUnknown,
kConversion,
kLinearCombination,
kLinearCombinationClamp,
kLinearCombinationPlanarComplex,
kLinearCombinationRelu,
kLinearCombinationSigmoid,
kInvalid
};
enum class RasterOrder {
kAlongN,
kAlongM,
kHeuristic,
kInvalid
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/include/cutlass/library/types.h/0 | {
"file_path": "tools/library/include/cutlass/library/types.h",
"repo_id": "tools",
"token_count": 1897
} | 56 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief
*/
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "conv_reference_operation.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_conv2d_reference_operations(Manifest &manifest) {
make_conv_all<
2,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t,
cutlass::half_t
>(manifest);
make_conv_all<
2,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::half_t, cutlass::layout::TensorNHWC,
cutlass::half_t, cutlass::layout::TensorNHWC,
float, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::bfloat16_t, cutlass::layout::TensorNHWC,
cutlass::bfloat16_t, cutlass::layout::TensorNHWC,
cutlass::bfloat16_t, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::bfloat16_t, cutlass::layout::TensorNHWC,
cutlass::bfloat16_t, cutlass::layout::TensorNHWC,
float, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::tfloat32_t, cutlass::layout::TensorNHWC,
cutlass::tfloat32_t, cutlass::layout::TensorNHWC,
cutlass::tfloat32_t, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::tfloat32_t, cutlass::layout::TensorNHWC,
cutlass::tfloat32_t, cutlass::layout::TensorNHWC,
float, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
float, cutlass::layout::TensorNHWC,
float, cutlass::layout::TensorNHWC,
float, cutlass::layout::TensorNHWC,
float,
float
>(manifest);
make_conv_all<
2,
cutlass::complex<float>, cutlass::layout::TensorNHWC,
cutlass::complex<float>, cutlass::layout::TensorNHWC,
cutlass::complex<float>, cutlass::layout::TensorNHWC,
cutlass::complex<float>,
cutlass::complex<float>
>(manifest);
make_conv_fprop<
2,
int8_t, cutlass::layout::TensorNHWC,
int8_t, cutlass::layout::TensorNHWC,
int32_t, cutlass::layout::TensorNHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
2,
int8_t, cutlass::layout::TensorNHWC,
int8_t, cutlass::layout::TensorNHWC,
int8_t, cutlass::layout::TensorNHWC,
float,
int32_t,
NumericConverterClamp<int8_t, float>
>(manifest);
make_conv_fprop<
2,
uint8_t, cutlass::layout::TensorNHWC,
uint8_t, cutlass::layout::TensorNHWC,
uint8_t, cutlass::layout::TensorNHWC,
float,
int32_t,
NumericConverterClamp<uint8_t, float>
>(manifest);
make_conv_fprop<
2,
uint8_t, cutlass::layout::TensorNHWC,
uint8_t, cutlass::layout::TensorNHWC,
int32_t, cutlass::layout::TensorNHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
2,
uint8_t, cutlass::layout::TensorNHWC,
uint8_t, cutlass::layout::TensorNHWC,
int8_t, cutlass::layout::TensorNHWC,
float,
int32_t,
NumericConverterClamp<int8_t, float>
>(manifest);
make_conv_fprop<
2,
cutlass::int4b_t, cutlass::layout::TensorNHWC,
cutlass::int4b_t, cutlass::layout::TensorNHWC,
int32_t, cutlass::layout::TensorNHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
2,
cutlass::int4b_t, cutlass::layout::TensorNHWC,
cutlass::int4b_t, cutlass::layout::TensorNHWC,
cutlass::int4b_t, cutlass::layout::TensorNHWC,
float,
int32_t,
NumericConverterClamp<cutlass::int4b_t, float>
>(manifest);
make_conv_fprop<
2,
cutlass::uint4b_t, cutlass::layout::TensorNHWC,
cutlass::uint4b_t, cutlass::layout::TensorNHWC,
int32_t, cutlass::layout::TensorNHWC,
int32_t,
int32_t,
NumericConverterClamp<int32_t, int32_t>
>(manifest);
make_conv_fprop<
2,
cutlass::uint4b_t, cutlass::layout::TensorNHWC,
cutlass::uint4b_t, cutlass::layout::TensorNHWC,
cutlass::uint4b_t, cutlass::layout::TensorNHWC,
float,
int32_t,
NumericConverterClamp<cutlass::uint4b_t, float>
>(manifest);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/library/src/reference/conv2d.cu/0 | {
"file_path": "tools/library/src/reference/conv2d.cu",
"repo_id": "tools",
"token_count": 2557
} | 57 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Provides several functions for filling tensors with data.
*/
#pragma once
#include <string>
#include <vector>
#include <map>
#include <iostream>
#include "cutlass/library/library.h"
#define TRACE(x) { std::cout << __FILE__ << ":" << __LINE__ << " " << x << std::endl; }
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
T from_string(std::string const &);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumerated type describing how the performance testbench evaluates kernels.
enum class ExecutionMode {
kProfile, ///< regular verification and profiling
kDryRun, ///< no kernels are launched or workspaces allocated; used to assess what operators might be launched
kEnumerate, ///< no kernels launched or workspaces allocated; lists all operation kind and operations
kTrace, ///< executes a single device-side computation with no other kernel launches
kInvalid
};
/// Converts a ExecutionMode enumerant to a string
char const *to_string(ExecutionMode mode, bool pretty = false);
/// Parses a ExecutionMode enumerant from a string
template <>
ExecutionMode from_string<ExecutionMode>(std::string const &str);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Library algorithm mode
enum class AlgorithmMode {
kMatching, ///< compare against best matching algorithm
kBest, ///< evaluate all library algorithms and report best
kDefault, ///< use the library's default algorithm option
kInvalid
};
/// Converts a ExecutionMode enumerant to a string
char const *to_string(AlgorithmMode mode, bool pretty = false);
/// Parses a ExecutionMode enumerant from a string
template <>
AlgorithmMode from_string<AlgorithmMode>(std::string const &str);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Outcome of a performance test
enum class Disposition {
kPassed,
kFailed,
kNotRun,
kIncorrect,
kNotVerified,
kInvalidProblem,
kNotSupported,
kInvalid
};
/// Converts a Disposition enumerant to a string
char const *to_string(Disposition disposition, bool pretty = false);
/// Parses a Disposition enumerant from a string
template <>
Disposition from_string<Disposition>(std::string const &str);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Indicates when to save
enum class SaveWorkspace {
kNever,
kIncorrect,
kAlways,
kInvalid
};
/// Converts a SaveWorkspace enumerant to a string
char const *to_string(SaveWorkspace save_option, bool pretty = false);
/// Parses a SaveWorkspace enumerant from a string
template <>
SaveWorkspace from_string<SaveWorkspace>(std::string const &str);
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Indicates the type of kernel argument
// ArgumentType can be both ScalarType or NumericType. Thus, enums kScalar and kNumeric
// 1) kScalar: e.g. of a Scalar ArgumentType is u32 is a Scalar type.
// Its c++ equivalent as "type name = initializer" is "u32 m = 32"
// 2) kNumeric: e.g. of a Numeric ArgumentType is NumericTypeID is a Numeric type.
// Its c++ equivalent as "type name = initializer" is "NumericTypeID numeric_type = u32"
enum class ArgumentTypeID {
kScalar,
kInteger,
kTensor,
kBatchedTensor,
kStructure,
kEnumerated,
kInvalid
};
/// Converts a ArgumentTypeID enumerant to a string
char const *to_string(ArgumentTypeID type, bool pretty = false);
/// Parses a ArgumentTypeID enumerant from a string
template <>
ArgumentTypeID from_string<ArgumentTypeID>(std::string const &str);
/////////////////////////////////////////////////////////////////////////////////////////////////
// Profiler typedefs
using ProviderVector = std::vector<library::Provider>;
using DispositionMap = std::map<library::Provider, Disposition>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Print vector for the report
template <typename T>
std::ostream& operator<< (std::ostream& out, const std::vector<T>& v) {
for (size_t i = 0; i < v.size(); ++i) {
out << to_string(v[i], true) << (i + 1u != v.size() ? "," : "");
}
return out;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
| tools/profiler/include/cutlass/profiler/enumerated_types.h/0 | {
"file_path": "tools/profiler/include/cutlass/profiler/enumerated_types.h",
"repo_id": "tools",
"token_count": 1677
} | 58 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Helper functions for mapping CUTLASS concepts to cuBLAS.
*/
#include <stdexcept>
#if CUTLASS_ENABLE_CUBLAS
#include "cutlass/profiler/cublas_helpers.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Converts a cuBLAS status to cutlass::Status
Status get_cutlass_status(cublasStatus_t cublas) {
switch (cublas) {
case CUBLAS_STATUS_SUCCESS:
return Status::kSuccess;
case CUBLAS_STATUS_INVALID_VALUE:
return Status::kErrorInvalidProblem;
case CUBLAS_STATUS_NOT_SUPPORTED:
return Status::kErrorNotSupported;
default: break;
}
return Status::kErrorInternal;
}
/// Converts a cuBLAS status to cutlass::profiler::Disposition
Disposition get_cutlass_disposition(cublasStatus_t cublas_status) {
if (cublas_status == CUBLAS_STATUS_INVALID_VALUE) {
return Disposition::kInvalidProblem;
}
else if (cublas_status == CUBLAS_STATUS_NOT_SUPPORTED) {
return Disposition::kNotSupported;
}
return Disposition::kFailed;
}
/// Maps a CUTLASS tensor layout to a cuBLAS transpose operation
bool get_cublas_transpose_operation(
cublasOperation_t &operation,
library::LayoutTypeID layout,
library::ComplexTransform transform) {
switch (layout) {
case library::LayoutTypeID::kColumnMajor:
if (transform == library::ComplexTransform::kNone) {
operation = CUBLAS_OP_N;
return true;
}
else {
return false;
}
break;
case library::LayoutTypeID::kRowMajor:
if (transform == library::ComplexTransform::kNone) {
operation = CUBLAS_OP_T;
return true;
}
else if (transform == library::ComplexTransform::kConjugate) {
operation = CUBLAS_OP_C;
return true;
}
break;
default: break;
}
return false;
}
/// Maps a CUTLASS numeric type to a cuBLAS data type enumeration
bool get_cublas_datatype(cublasDataType_t &data_type, library::NumericTypeID element_type) {
switch (element_type) {
case library::NumericTypeID::kFE4M3:
#if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))
data_type = CUDA_R_8F_E4M3;
return true;
#endif
break;
case library::NumericTypeID::kFE5M2:
#if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))
data_type = CUDA_R_8F_E5M2;
return true;
#endif
break;
case library::NumericTypeID::kF16:
data_type = CUDA_R_16F;
return true;
case library::NumericTypeID::kBF16:
data_type = CUDA_R_16BF;
return true;
case library::NumericTypeID::kTF32:
break;
case library::NumericTypeID::kF32:
data_type = CUDA_R_32F;
return true;
case library::NumericTypeID::kF64:
data_type = CUDA_R_64F;
return true;
case library::NumericTypeID::kS4:
break;
case library::NumericTypeID::kS8:
data_type = CUDA_R_8I;
return true;
case library::NumericTypeID::kS16:
break;
case library::NumericTypeID::kS32:
data_type = CUDA_R_32I;
return true;
case library::NumericTypeID::kS64:
break;
case library::NumericTypeID::kU4:
break;
case library::NumericTypeID::kU8:
data_type = CUDA_R_8U;
return true;
case library::NumericTypeID::kU16:
break;
case library::NumericTypeID::kU32:
data_type = CUDA_R_32U;
return true;
case library::NumericTypeID::kU64:
break;
case library::NumericTypeID::kB1:
break;
case library::NumericTypeID::kCF32:
data_type = CUDA_C_32F;
return true;
case library::NumericTypeID::kCF64:
data_type = CUDA_C_64F;
return true;
case library::NumericTypeID::kInvalid:
default:
break;
}
return false;
}
/// Maps a cutlass::SideMode to cuBLAS side mode
bool get_cublas_side_mode(cublasSideMode_t& side, SideMode side_mode) {
switch (side_mode) {
case SideMode::kLeft:
side = CUBLAS_SIDE_LEFT;
return true;
case SideMode::kRight:
side = CUBLAS_SIDE_RIGHT;
return true;
default: break;
}
return false;
}
/// Maps a cutlass::FillMode to cuBLAS fill mode
bool get_cublas_fill_mode(cublasFillMode_t& uplo, FillMode fill_mode) {
switch (fill_mode) {
case FillMode::kLower:
uplo = CUBLAS_FILL_MODE_LOWER;
return true;
case FillMode::kUpper:
uplo = CUBLAS_FILL_MODE_UPPER;
return true;
default: break;
}
return false;
}
/// Maps a cutlass::DiagType to cuBLAS diag type
bool get_cublas_diag_type(cublasDiagType_t& diag, DiagType diag_type) {
switch (diag_type) {
case DiagType::kNonUnit:
diag = CUBLAS_DIAG_NON_UNIT;
return true;
case DiagType::kUnit:
diag = CUBLAS_DIAG_UNIT;
return true;
default: break;
}
return false;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Gets the cublas algorithm given threadblock tile dimensions and math opcode class
cublasGemmAlgo_t get_cublas_gemm_algo(int cta_m, int cta_n, int cta_k, library::OpcodeClassID opcode_class) {
return (opcode_class == library::OpcodeClassID::kSimt ?
CUBLAS_GEMM_DEFAULT : CUBLAS_GEMM_DEFAULT_TENSOR_OP);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular GEMM description
Status cublas_satisfies(library::GemmDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.C.element == library::NumericTypeID::kS4 ||
desc.C.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasGemmExDispatcher::cublasGemmExDispatcher(
library::GemmDescription const &op_desc,
library::GemmUniversalConfiguration configuration_,
library::GemmUniversalArguments arguments_,
cublasGemmAlgo_t algorithm
):
configuration(configuration_), arguments(arguments_), algo(algorithm), status(Status::kSuccess) {
bool good = true;
good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A));
good = (good && get_cublas_transpose_operation(trans_B, op_desc.B.layout, op_desc.transform_B));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_B, op_desc.B.element));
good = (good && get_cublas_datatype(data_type_C, op_desc.C.element));
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
op_desc.B.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes GEMM using these arguments
cublasStatus_t cublasGemmExDispatcher::operator()(cublasHandle_t handle) {
if (configuration.mode == library::GemmUniversalMode::kBatched) {
return cublasGemmStridedBatchedEx(
handle,
trans_A,
trans_B,
configuration.problem_size.m(),
configuration.problem_size.n(),
configuration.problem_size.k(),
arguments.alpha,
arguments.A,
data_type_A,
int(configuration.lda),
arguments.batch_stride_A,
arguments.B,
data_type_B,
int(configuration.ldb),
arguments.batch_stride_B,
arguments.beta,
arguments.D,
data_type_C,
int(configuration.ldc),
arguments.batch_stride_C,
configuration.batch_count,
#if (__CUDACC_VER_MAJOR__ >= 11)
compute_type,
#else
compute_data_type,
#endif
algo
);
}
else {
return cublasGemmEx(
handle,
trans_A,
trans_B,
configuration.problem_size.m(),
configuration.problem_size.n(),
configuration.problem_size.k(),
arguments.alpha,
arguments.A,
data_type_A,
int(configuration.lda),
arguments.B,
data_type_B,
int(configuration.ldb),
arguments.beta,
arguments.D,
data_type_C,
int(configuration.ldc),
#if (__CUDACC_VER_MAJOR__ >= 11)
compute_type,
#else
compute_data_type,
#endif
algo
);
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular RankK description
Status cublas_satisfies(library::RankKDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.C.element == library::NumericTypeID::kS4 ||
desc.C.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasRankKDispatcher::cublasRankKDispatcher(
library::RankKDescription const &op_desc,
library::RankKConfiguration configuration_,
library::RankKArguments arguments_
):
configuration(configuration_), arguments(arguments_), status(Status::kSuccess) {
blas_mode = op_desc.blas_mode;
num_ranks = op_desc.num_ranks;
bool good = true;
good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A));
good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_C, op_desc.C.element));
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes RankK using these arguments
cublasStatus_t cublasRankKDispatcher::operator()(cublasHandle_t handle) {
// SYRK and HERK
if (num_ranks == 1) {
if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) {
return cublasDsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.beta),
static_cast<double*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasSsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.beta),
static_cast<float*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) {
if (blas_mode == BlasMode::kHermitian) {
return cublasZherk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const double*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasZsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
if (blas_mode == BlasMode::kHermitian) {
return cublasCherk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const float*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasCsyrk(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
// SYR2K and HER2K
else if (num_ranks == 2) {
if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) {
return cublasDsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.B),
int(configuration.ldb),
static_cast<const double*>(arguments.beta),
static_cast<double*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasSsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.B),
int(configuration.ldb),
static_cast<const float*>(arguments.beta),
static_cast<float*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) {
if (blas_mode == BlasMode::kHermitian) {
return cublasZher2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const double*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasZsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
if (blas_mode == BlasMode::kHermitian) {
return cublasCher2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const float*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasCsyr2k(
handle,
uplo,
trans_A,
configuration.problem_size.n(),
configuration.problem_size.k(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular TRMM description
Status cublas_satisfies(library::TrmmDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.D.element == library::NumericTypeID::kS4 ||
desc.D.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasTrmmDispatcher::cublasTrmmDispatcher(
library::TrmmDescription const &op_desc,
library::TrmmConfiguration configuration_,
library::TrmmArguments arguments_
):
configuration(configuration_), arguments(arguments_), status(Status::kSuccess) {
bool good = true;
good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A));
good = (good && get_cublas_side_mode(side, op_desc.side_mode));
good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode));
good = (good && get_cublas_diag_type(diag, op_desc.diag_type));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_B, op_desc.B.element));
good = (good && get_cublas_datatype(data_type_D, op_desc.D.element));
// if A is Transposed, then for cuBLAS that is inverted Fill Mode.
if (trans_A == CUBLAS_OP_T || trans_A == CUBLAS_OP_C) {
if (uplo == CUBLAS_FILL_MODE_LOWER)
uplo = CUBLAS_FILL_MODE_UPPER;
else
uplo = CUBLAS_FILL_MODE_LOWER;
}
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes TRMM using these arguments
cublasStatus_t cublasTrmmDispatcher::operator()(cublasHandle_t handle) {
if (data_type_A == data_type_D && data_type_A == CUDA_R_64F) {
return cublasDtrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.B),
int(configuration.ldb),
static_cast<double*>(arguments.D),
int(configuration.ldd)
);
} else if (data_type_A == data_type_D && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasStrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.B),
int(configuration.ldb),
static_cast<float*>(arguments.D),
int(configuration.ldd)
);
} else if (data_type_A == data_type_D && data_type_A == CUDA_C_64F) {
return cublasZtrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldd)
);
} else if (data_type_A == data_type_D && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasCtrmm(
handle,
side,
uplo,
trans_A,
diag,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldd)
);
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a status if cuBLAS can satisfy a particular Symm description
Status cublas_satisfies(library::SymmDescription const &desc) {
auto const &math_instruction = desc.tile_description.math_instruction;
if (math_instruction.element_accumulator == library::NumericTypeID::kS32 &&
math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) {
return Status::kErrorNotSupported;
}
// output type S4 and S8 not supported in cuBLAS
if (desc.C.element == library::NumericTypeID::kS4 ||
desc.C.element == library::NumericTypeID::kS8) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.A.element == library::NumericTypeID::kBF16 ||
desc.A.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
// input type BF16 and TF32 not supported in cuBLAS
if (desc.B.element == library::NumericTypeID::kBF16 ||
desc.B.element == library::NumericTypeID::kTF32) {
return Status::kErrorNotSupported;
}
// only column major layout is supported in cuBLAS
if (desc.A.layout != library::LayoutTypeID::kColumnMajor ||
desc.transform_A != library::ComplexTransform::kNone) {
return Status::kErrorNotSupported;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
cublasSymmDispatcher::cublasSymmDispatcher(
library::SymmDescription const &op_desc,
library::SymmConfiguration configuration_,
library::SymmArguments arguments_
):
configuration(configuration_), arguments(arguments_), status(Status::kSuccess) {
blas_mode = op_desc.blas_mode;
bool good = true;
good = (good && get_cublas_side_mode(side, op_desc.side_mode));
good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode));
good = (good && get_cublas_datatype(data_type_A, op_desc.A.element));
good = (good && get_cublas_datatype(data_type_C, op_desc.C.element));
good = (good && get_cublas_datatype(
compute_data_type,
op_desc.tile_description.math_instruction.element_accumulator));
// cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe
// internal numerical data types used in the computation.
#if (__CUDACC_VER_MAJOR__ >= 11)
library::OpcodeClassID const & opcode_class =
op_desc.tile_description.math_instruction.opcode_class;
if (good &&
op_desc.A.element == library::NumericTypeID::kF32 &&
opcode_class == library::OpcodeClassID::kTensorOp) {
compute_type = CUBLAS_COMPUTE_32F_FAST_TF32;
}
else if (good) {
bool const isPedantic = false;
switch (compute_data_type) {
case CUDA_R_32F:
case CUDA_C_32F:
compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F;
break;
case CUDA_R_64F:
case CUDA_C_64F:
compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F;
break;
case CUDA_R_16F:
compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F;
break;
case CUDA_R_32I:
compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I;
break;
default:
good = false;
break;
}
}
#endif // __CUDACC_VER_MAJOR__ >= 11
if (!good) {
status = Status::kErrorNotSupported;
}
}
/// Executes Symm using these arguments
cublasStatus_t cublasSymmDispatcher::operator()(cublasHandle_t handle) {
// SYMM and HEMM
if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) {
return cublasDsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const double*>(arguments.alpha),
static_cast<const double*>(arguments.A),
int(configuration.lda),
static_cast<const double*>(arguments.B),
int(configuration.ldb),
static_cast<const double*>(arguments.beta),
static_cast<double*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
return cublasSsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const float*>(arguments.alpha),
static_cast<const float*>(arguments.A),
int(configuration.lda),
static_cast<const float*>(arguments.B),
int(configuration.ldb),
static_cast<const float*>(arguments.beta),
static_cast<float*>(arguments.D),
int(configuration.ldc)
);
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) {
if (blas_mode == BlasMode::kHermitian) {
return cublasZhemm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasZsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuDoubleComplex*>(arguments.alpha),
static_cast<const cuDoubleComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuDoubleComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuDoubleComplex*>(arguments.beta),
static_cast<cuDoubleComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) {
#if (__CUDACC_VER_MAJOR__ >= 11)
if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS)
return CUBLAS_STATUS_NOT_SUPPORTED;
#endif
if (blas_mode == BlasMode::kHermitian) {
return cublasChemm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
else {
return cublasCsymm(
handle,
side,
uplo,
configuration.problem_size.m(),
configuration.problem_size.n(),
static_cast<const cuComplex*>(arguments.alpha),
static_cast<const cuComplex*>(arguments.A),
int(configuration.lda),
static_cast<const cuComplex*>(arguments.B),
int(configuration.ldb),
static_cast<const cuComplex*>(arguments.beta),
static_cast<cuComplex*>(arguments.D),
int(configuration.ldc)
);
}
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
#endif // #if CUTLASS_ENABLE_CUBLAS
| tools/profiler/src/cublas_helpers.cu/0 | {
"file_path": "tools/profiler/src/cublas_helpers.cu",
"repo_id": "tools",
"token_count": 15733
} | 59 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/profiler/cublas_helpers.h"
#include "cutlass/profiler/sparse_gemm_operation_profiler.h"
#include "cutlass/profiler/gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
SparseGemmOperationProfiler::SparseGemmOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kSparseGemm,
{
{ArgumentTypeID::kEnumerated, {"gemm_kind"}, "Variant of GEMM (e.g. sparse, ...)"},
{ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the GEMM problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
{ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
{ArgumentTypeID::kTensor, {"E"}, "Tensor storing the E operand"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kInteger, {"split_k_slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count"}, "Number of GEMMs computed in one batch"},
}
) {
description_ = " Structured sparse GEMM. D = alpha * A*B + beta * C";
}
/// Destructor
SparseGemmOperationProfiler::~SparseGemmOperationProfiler() {
}
/// Prints usage statement for the math function
void SparseGemmOperationProfiler::print_usage(std::ostream &out) const {
out << "Sparse GEMM" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void SparseGemmOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size:\n"
<< " $ cutlass_profiler --operation=SparseGemm --m=1024 --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=SparseGemm --m=1024:4096:256 --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=SparseGemm --accumulator-type=f16,f32\n\n"
<< "Run when A is f16 with column-major and B is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=SparseGemm --A=f16:column --B=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=SparseGemm --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=SparseGemm --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=SparseGemm --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=SparseGemm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to gemm kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=SparseGemm \\ \n"
<< " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Status SparseGemmOperationProfiler::SparseGemmProblem::parse(
library::SparseGemmDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!arg_as_int(this->m, "m", problem_space, problem)) {
// default value
this->m = 1024;
}
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.E, "E", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->elements_per_128b =
128 / library::sizeof_bits(operation_desc.A.element);
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout,
{int(this->m), int(this->k) / int(this->sparse)})
.front();
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->k), int(this->n)}).front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->m), int(this->n)}).front();
this->lde =
DeviceAllocation::get_packed_layout(
operation_desc.E.layout,
{int(this->m), int(this->k / this->sparse / this->elements_per_128b)})
.front();
return Status::kSuccess;
}
/// Initializes a performance result
void SparseGemmOperationProfiler::SparseGemmProblem::initialize_result(
PerformanceResult &result,
library::SparseGemmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "gemm_kind", problem_space, library::to_string(operation_desc.gemm_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
set_argument(result, "E", problem_space,
std::string(library::to_string(operation_desc.E.element)) + ":" + library::to_string(operation_desc.E.layout));
set_argument(result, "m", problem_space, m);
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/// Extracts the problem dimensions
Status SparseGemmOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::SparseGemmDescription const &operation_desc =
static_cast<library::SparseGemmDescription const &>(operation->description());
if (operation_desc.gemm_kind != library::GemmKind::kSparse) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
gemm_workspace_.configuration.problem_size.m() = int(problem_.m);
gemm_workspace_.configuration.problem_size.n() = int(problem_.n);
gemm_workspace_.configuration.problem_size.k() = int(problem_.k);
gemm_workspace_.configuration.lda = problem_.lda;
gemm_workspace_.configuration.ldb = problem_.ldb;
gemm_workspace_.configuration.ldc = problem_.ldc;
gemm_workspace_.configuration.ldd = problem_.ldc;
gemm_workspace_.configuration.lde = problem_.lde;
gemm_workspace_.arguments.A = nullptr;
gemm_workspace_.arguments.B = nullptr;
gemm_workspace_.arguments.C = nullptr;
gemm_workspace_.arguments.D = nullptr;
gemm_workspace_.arguments.E = nullptr;
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&gemm_workspace_.configuration, &gemm_workspace_.arguments);
}
/// Initializes the performance result
void SparseGemmOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::SparseGemmDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
// Input bytes read and Output bytes written for the gemm problem
result.bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) *
problem_.k / problem_.sparse +
int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.n / 8) *
problem_.k +
int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) *
problem_.n +
int64_t(library::sizeof_bits(operation_desc.E.element) * problem_.m / 8) *
problem_.k / problem_.sparse / problem_.elements_per_128b;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(problem_.beta.begin(), problem_.beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
result.bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * problem_.n;
}
result.flops = 2 * (problem_.m * problem_.n * problem_.k + problem_.m * problem_.n);
result.runtime = 0;
}
/// Initializes workspace
Status SparseGemmOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::SparseGemmDescription const &operation_desc =
static_cast<library::SparseGemmDescription const &>(operation->description());
if (options.execution_mode != ExecutionMode::kDryRun) {
int seed_shift = 0;
gemm_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.m), int(problem_.k) / int(problem_.sparse)},
{int(problem_.lda)},
1, // batch_count
seed_shift++
);
gemm_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
{int(problem_.k), int(problem_.n)},
{int(problem_.ldb)},
1, // batch_count
seed_shift++
);
gemm_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)},
1, // batch_count
seed_shift++
);
gemm_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)}
);
gemm_workspace_.E = device_context.allocate_sparsemeta_tensor(
options,
"E",
operation_desc.E.element,
operation_desc.E.layout,
operation_desc.A.element,
{int(problem_.m), int(problem_.k) / int(problem_.sparse) / int(problem_.elements_per_128b)},
{int(problem_.lde)},
1, // batch_count
seed_shift++
);
gemm_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.m), int(problem_.n)},
{int(problem_.ldc)}
);
gemm_workspace_.Reference->copy_from_device(gemm_workspace_.C->data());
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(&gemm_workspace_.configuration);
gemm_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(&gemm_workspace_.configuration);
gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = operation->initialize(
&gemm_workspace_.configuration,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kSparseGemm;
results_.back().disposition = Disposition::kNotRun;
for(auto &verification_provider : options.verification.providers) {
results_.back().verification_map[verification_provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool SparseGemmOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.E = gemm_workspace_.E->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool SparseGemmOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.E = gemm_workspace_.E->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| tools/profiler/src/sparse_gemm_operation_profiler.cu/0 | {
"file_path": "tools/profiler/src/sparse_gemm_operation_profiler.cu",
"repo_id": "tools",
"token_count": 7352
} | 60 |
/******************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/util/device_utils.h"
#include <float.h>
namespace cutlass {
__global__ void rmsnorm_twoPassAlgo_e8(float4 *output, const float4 *input,
const float4 *weight,
const int m, const int n, float epsilon) {
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean;
float local_sums[1] = {0.0f};
const int n_8 = n / 8;
int offset = m_idx * n_8;
input += offset;
output += offset;
for (int index = tid; index < n_8; index += bdimx) {
const float4 local_val = input[index];
const half2 *h1 = (half2 *)&local_val.x;
const half2 *h2 = (half2 *)&local_val.y;
const half2 *h3 = (half2 *)&local_val.z;
const half2 *h4 = (half2 *)&local_val.w;
local_sums[0] += static_cast<float>(h1->x) * static_cast<float>(h1->x) +
static_cast<float>(h1->y) * static_cast<float>(h1->y) +
static_cast<float>(h2->x) * static_cast<float>(h2->x) +
static_cast<float>(h2->y) * static_cast<float>(h2->y) +
static_cast<float>(h3->x) * static_cast<float>(h3->x) +
static_cast<float>(h3->y) * static_cast<float>(h3->y) +
static_cast<float>(h4->x) * static_cast<float>(h4->x) +
static_cast<float>(h4->y) * static_cast<float>(h4->y);
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
} else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = rsqrtf(local_sums[0] / n + epsilon);
}
__syncthreads();
for (int index = tid; index < n_8; index += bdimx) {
const float4 local_val = input[index];
const float4 weight_val = weight[index];
const half2 *l1 = (half2 *)&local_val.x;
const half2 *l2 = (half2 *)&local_val.y;
const half2 *l3 = (half2 *)&local_val.z;
const half2 *l4 = (half2 *)&local_val.w;
const half2 *g1 = (half2 *)&weight_val.x;
const half2 *g2 = (half2 *)&weight_val.y;
const half2 *g3 = (half2 *)&weight_val.z;
const half2 *g4 = (half2 *)&weight_val.w;
float4 tmp;
half2 *h1 = (half2 *)&tmp.x;
half2 *h2 = (half2 *)&tmp.y;
half2 *h3 = (half2 *)&tmp.z;
half2 *h4 = (half2 *)&tmp.w;
h1->x = half(static_cast<float>(l1->x) * s_mean * static_cast<float>(g1->x));
h1->y = half(static_cast<float>(l1->y) * s_mean * static_cast<float>(g1->y));
h2->x = half(static_cast<float>(l2->x) * s_mean * static_cast<float>(g2->x));
h2->y = half(static_cast<float>(l2->y) * s_mean * static_cast<float>(g2->y));
h3->x = half(static_cast<float>(l3->x) * s_mean * static_cast<float>(g3->x));
h3->y = half(static_cast<float>(l3->y) * s_mean * static_cast<float>(g3->y));
h4->x = half(static_cast<float>(l4->x) * s_mean * static_cast<float>(g4->x));
h4->y = half(static_cast<float>(l4->y) * s_mean * static_cast<float>(g4->y));
output[index] = tmp;
}
}
template<typename T>
__global__ void rmsnorm_twoPassAlgo_e1(T* output,
const T* input,
const T* weight,
const int m, const int n,
float epsilon)
{
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean;
float local_sums[1] = {0.0f};
int offset = m_idx * n;
input += offset;
output += offset;
for (int index = tid ; index < n ; index += bdimx){
float local_val = static_cast<float>(input[index]);
local_sums[0] += local_val * local_val;
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = rsqrtf(local_sums[0] / n + epsilon);
}
__syncthreads();
for (int index = tid ; index < n ; index += bdimx){
const T weight_val = weight[index];
const T local_val = input[index];
output[index] = T(static_cast<float>(local_val) * s_mean * static_cast<float>(weight_val));
}
}
template <typename T>
void rmsnorm(cutlass::MatrixCoord tensor_size,
TensorRef<T, layout::RowMajor> ref_output,
TensorRef<T, layout::RowMajor> ref_input,
TensorRef<T, layout::RowMajor> ref_weight,
cudaStream_t stream, float epsilon = 1e-5f){
const int m = tensor_size.row();
const int n = tensor_size.column();
T* output = ref_output.data();
const T* input = ref_input.data();
const T* weight = ref_weight.data();
dim3 grid(m);
if (n % 8 == 0 && std::is_same<T, cutlass::half_t>::value) {
dim3 block(min(1024, (n / 8 + 31) / 32 * 32));
rmsnorm_twoPassAlgo_e8<<<grid, block, 0, stream>>>(
(float4 *)output, (const float4 *)input, (const float4 *)weight, m, n, epsilon);
} else {
dim3 block(min(1024, ((n + 31)/32 + 31)/32*32));
rmsnorm_twoPassAlgo_e1<<<grid, block, 0, stream>>>(
output, input, weight, m, n, epsilon);
}
auto result = cudaGetLastError();
if (result != cudaSuccess) {
std::cerr << "CUDA error: " << cudaGetErrorString(result) << std::endl;
abort();
}
}
} // namespace cutlass
| tools/util/include/cutlass/util/device_rmsnorm.h/0 | {
"file_path": "tools/util/include/cutlass/util/device_rmsnorm.h",
"repo_id": "tools",
"token_count": 3049
} | 61 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for GEMM in device-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/util/reference/device/kernel/gemm.h"
namespace cutlass {
namespace reference {
namespace device {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// Explicitly naming types needed by this template can be cumbersome, particularly for the
/// accumulator type, so a function argument 'initial_accum' is exposed. Passing
/// AccumulatorType(0) as the last function argument can be easier than naming all template
/// arguments explicitly.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp = multiply_add<AccumulatorType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
// Blocking structure potentially improves performance of reference implementation
// with a minor increase in complexity.
//
// Note, this reference implementation is NOT expected to approach peak performance.
using OutputTile = MatrixShape<4, 4>;
dim3 block(16, 8);
dim3 grid(
(problem_size.m() + block.x * OutputTile::kRow - 1) / (block.x * OutputTile::kRow),
(problem_size.n() + block.y * OutputTile::kColumn - 1) / (block.y * OutputTile::kColumn)
);
// Launch a GEMM kernel
kernel::Gemm<
TensorRef<ElementA, LayoutA>,
TensorRef<ElementB, LayoutB>,
TensorRef<ElementC, LayoutC>,
ScalarType,
AccumulatorType,
OutputTile,
InnerProductOp,
ConvertOp
><<< grid, block >>>(
problem_size,
alpha,
tensor_a,
tensor_b,
beta,
tensor_c,
tensor_d,
initial_accum
);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// This assumes the accumulator type is the same type as the scalars.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp = multiply_add<AccumulatorType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum) {
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, InnerProductOp, ConvertOp>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c,
initial_accum);
}
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp = cutlass::arch::OpMultiplyAdd
>
struct Gemm;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename AccumulatorType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, arch::OpMultiplyAdd> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add-saturate
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename AccumulatorType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
AccumulatorType, arch::OpMultiplyAddSaturate> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>,
NumericConverterClamp<ElementC, ScalarType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>,
NumericConverterClamp<ElementC, ScalarType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for XOR-popc
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename AccumulatorType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
AccumulatorType, arch::OpXorPopc> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, xor_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, xor_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Batched GEMM
//
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a batch of GEMMs over a set of matrices of common dimension.
//
// TensorRefCollection* is a type satisfying the TensorRefCollection concept.
//
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp,
typename ConvertOp
>
void BatchedGemm(
gemm::GemmCoord problem_size,
int batch_count,
ScalarType alpha,
TensorRefCollectionA const& tensor_a,
TensorRefCollectionB const& tensor_b,
ScalarType beta,
TensorRefCollectionC &tensor_c,
AccumulatorType initial_accum) {
static_assert(
TensorRefCollectionA::kRank == 2 &&
TensorRefCollectionB::kRank == 2 &&
TensorRefCollectionC::kRank == 2, "Tensors must be of rank 2");
// Blocking structure potentially improves performance of reference implementation
// with a minor increase in complexity.
//
// Note, this reference implementation is NOT expected to approach peak performance.
using OutputTile = MatrixShape<4, 4>;
dim3 block(16, 8);
dim3 grid(
(problem_size.m() + block.x * OutputTile::kRow - 1) / (block.x * OutputTile::kRow),
(problem_size.n() + block.y * OutputTile::kColumn - 1) / (block.y * OutputTile::kColumn),
batch_count
);
// Launch a GEMM kernel
kernel::BatchedGemm<
TensorRefCollectionA,
TensorRefCollectionB,
TensorRefCollectionC,
ScalarType,
AccumulatorType,
OutputTile,
InnerProductOp,
ConvertOp
><<< grid, block >>>(
problem_size,
alpha,
tensor_a,
tensor_b,
beta,
tensor_c,
initial_accum
);
}
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
//
// TensorRefCollection* is a type satisfying the TensorRefCollection concept.
//
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType
>
void BatchedGemm(
gemm::GemmCoord problem_size,
int batch_count,
ScalarType alpha,
TensorRefCollectionA const& tensor_a,
TensorRefCollectionB const& tensor_b,
ScalarType beta,
TensorRefCollectionC &tensor_c) {
BatchedGemm(problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, ScalarType(0));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
| tools/util/include/cutlass/util/reference/device/gemm.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/device/gemm.h",
"repo_id": "tools",
"token_count": 5124
} | 62 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <stdexcept>
#include "cutlass/cutlass.h"
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines several helpers
namespace detail {
/// Helper to perform for-each operation
template <typename Func, int Rank, int RankRemaining>
struct TensorForEachHelper {
/// Index of the active rank
static int const kActiveRank = Rank - RankRemaining - 1;
/// Constructor for general rank
TensorForEachHelper(
Func &func,
Coord<Rank> const &extent,
Coord<Rank> &coord) {
for (int i = 0; i < extent.at(kActiveRank); ++i) {
coord[kActiveRank] = i;
TensorForEachHelper<Func, Rank, RankRemaining - 1>(func, extent, coord);
}
}
};
/// Helper to perform for-each operation
template <typename Func, int Rank>
struct TensorForEachHelper<Func, Rank, 0> {
/// Index of the active rank
static int const kActiveRank = Rank - 1;
/// Constructor for fastest changing rank
TensorForEachHelper(
Func &func,
Coord<Rank> const &extent,
Coord<Rank> &coord) {
for (int i = 0; i < extent.at(kActiveRank); ++i) {
coord[kActiveRank] = i;
func(coord);
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Iterates over the index space of a tensor
template <
typename Func, ///< function applied to each point in a tensor's index space
int Rank> ///< rank of index space
void TensorForEach(Coord<Rank> extent, Func & func) {
Coord<Rank> coord;
detail::TensorForEachHelper<Func, Rank, Rank - 1>(func, extent, coord);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Iterates over the index space of a tensor and calls a C++ lambda
template <
typename Func, ///< function applied to each point in a tensor's index space
int Rank> ///< rank of index space
void TensorForEachLambda(Coord<Rank> extent, Func func) {
Coord<Rank> coord;
detail::TensorForEachHelper<Func, Rank, Rank - 1>(func, extent, coord);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Func>
struct BlockForEach {
/// Constructor performs the operation.
BlockForEach(
Element *ptr,
size_t capacity,
typename Func::Params params = typename Func::Params()) {
Func func(params);
for (size_t index = 0; index < capacity; ++index) {
ptr[index] = func();
}
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| tools/util/include/cutlass/util/reference/host/tensor_foreach.h/0 | {
"file_path": "tools/util/include/cutlass/util/reference/host/tensor_foreach.h",
"repo_id": "tools",
"token_count": 1327
} | 63 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates how to call a CUTLASS GEMM kernel and provides a naive reference
matrix multiply kernel to verify its correctness.
The CUTLASS Gemm template is instantiated in the function CutlassSgemmNN. This is kernel computes
the general matrix product (GEMM) using single-precision floating-point arithmetic and assumes
all matrices have column-major layout.
The threadblock tile size is chosen as 128x128x8 which offers good performance for large matrices.
See the CUTLASS Parallel for All blog post for more exposition on the tunable parameters available
in CUTLASS.
https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/
Aside from defining and launching the SGEMM kernel, this example does not use any other components
or utilities within CUTLASS. Such utilities are demonstrated elsewhere in other examples and are
prevalent in the CUTLASS unit tests.
This example has delibrately been kept similar to the basic_gemm example from cutlass-1.3 to
highlight the minimum amount of differences needed to transition to cutlass-2.0.
Cutlass-1.3 sgemm: https://github.com/NVIDIA/cutlass/blob/master/examples/00_basic_gemm/basic_gemm.cu
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// Helper methods to check for errors
#include "helper.h"
//
// CUTLASS includes needed for single-precision GEMM kernel
//
// Defines cutlass::gemm::device::Gemm, the generic Gemm computation template class.
#include "cutlass/gemm/device/gemm.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// This function defines a CUTLASS GEMM kernel instantiation, constructs its parameters object,
// and launches it on the CUDA device.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
cudaError_t CutlassSgemmNN(
int M,
int N,
int K,
float alpha,
float const *A,
int lda,
float const *B,
int ldb,
float beta,
float *C,
int ldc) {
// Define type definition for single-precision CUTLASS GEMM with column-major
// input matrices and 128x128x8 threadblock tile size (chosen by default).
//
// To keep the interface manageable, several helpers are defined for plausible compositions
// including the following example for single-precision GEMM. Typical values are used as
// default template arguments. See `cutlass/gemm/device/default_gemm_configuration.h` for more details.
//
// To view the full gemm device API interface, see `cutlass/gemm/device/gemm.h`
using ColumnMajor = cutlass::layout::ColumnMajor;
using CutlassGemm = cutlass::gemm::device::Gemm<float, // Data-type of A matrix
ColumnMajor, // Layout of A matrix
float, // Data-type of B matrix
ColumnMajor, // Layout of B matrix
float, // Data-type of C matrix
ColumnMajor>; // Layout of C matrix
// Define a CUTLASS GEMM type
CutlassGemm gemm_operator;
// Construct the CUTLASS GEMM arguments object.
//
// One of CUTLASS's design patterns is to define gemm argument objects that are constructible
// in host code and passed to kernels by value. These may include pointers, strides, scalars,
// and other arguments needed by Gemm and its components.
//
// The benefits of this pattern are (1.) a structured, composable strategy for passing host-constructible
// arguments to kernels and (2.) minimized initialization overhead on kernel entry.
//
CutlassGemm::Arguments args({M , N, K}, // Gemm Problem dimensions
{A, lda}, // Tensor-ref for source matrix A
{B, ldb}, // Tensor-ref for source matrix B
{C, ldc}, // Tensor-ref for source matrix C
{C, ldc}, // Tensor-ref for destination matrix D (may be different memory than source C matrix)
{alpha, beta}); // Scalars used in the Epilogue
//
// Launch the CUTLASS GEMM kernel.
//
cutlass::Status status = gemm_operator(args);
//
// Return a cudaError_t if the CUTLASS GEMM operator returned an error code.
//
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
// Return success, if no errors were encountered.
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// The source code after this point in the file is generic CUDA using the CUDA Runtime API
// and simple CUDA kernels to initialize matrices and compute the general matrix product.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize a matrix with small integers.
__global__ void InitializeMatrix_kernel(
float *matrix,
int rows,
int columns,
int seed = 0) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < columns) {
int offset = i + j * rows;
// Generate arbitrary elements.
int const k = 16807;
int const m = 16;
float value = float(((offset + seed) * k % m) - m / 2);
matrix[offset] = value;
}
}
/// Simple function to initialize a matrix to arbitrary small integers.
cudaError_t InitializeMatrix(float *matrix, int rows, int columns, int seed = 0) {
dim3 block(16, 16);
dim3 grid(
(rows + block.x - 1) / block.x,
(columns + block.y - 1) / block.y
);
InitializeMatrix_kernel<<< grid, block >>>(matrix, rows, columns, seed);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocates device memory for a matrix then fills with arbitrary small integers.
cudaError_t AllocateMatrix(float **matrix, int rows, int columns, int seed = 0) {
cudaError_t result;
size_t sizeof_matrix = sizeof(float) * rows * columns;
// Allocate device memory.
result = cudaMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to allocate matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Clear the allocation.
result = cudaMemset(*matrix, 0, sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to clear matrix device memory: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Initialize matrix elements to arbitrary small integers.
result = InitializeMatrix(*matrix, rows, columns, seed);
if (result != cudaSuccess) {
std::cerr << "Failed to initialize matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
return result;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Naive reference GEMM computation.
__global__ void ReferenceGemm_kernel(
int M,
int N,
int K,
float alpha,
float const *A,
int lda,
float const *B,
int ldb,
float beta,
float *C,
int ldc) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < M && j < N) {
float accumulator = 0;
for (int k = 0; k < K; ++k) {
accumulator += A[i + k * lda] * B[k + j * ldb];
}
C[i + j * ldc] = alpha * accumulator + beta * C[i + j * ldc];
}
}
/// Reference GEMM computation.
cudaError_t ReferenceGemm(
int M,
int N,
int K,
float alpha,
float const *A,
int lda,
float const *B,
int ldb,
float beta,
float *C,
int ldc) {
dim3 block(16, 16);
dim3 grid(
(M + block.x - 1) / block.x,
(N + block.y - 1) / block.y
);
ReferenceGemm_kernel<<< grid, block >>>(M, N, K, alpha, A, lda, B, ldb, beta, C, ldc);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
cudaError_t TestCutlassGemm(int M, int N, int K, float alpha, float beta) {
cudaError_t result;
//
// Define several matrices to be used as operands to GEMM kernels.
//
// Compute leading dimensions for each matrix.
int lda = M;
int ldb = K;
int ldc = M;
// Compute size in bytes of the C matrix.
size_t sizeof_C = sizeof(float) * ldc * N;
// Define pointers to matrices in GPU device memory.
float *A;
float *B;
float *C_cutlass;
float *C_reference;
//
// Allocate matrices in GPU device memory with arbitrary seeds.
//
result = AllocateMatrix(&A, M, K, 0);
if (result != cudaSuccess) {
return result;
}
result = AllocateMatrix(&B, K, N, 17);
if (result != cudaSuccess) {
cudaFree(A);
return result;
}
result = AllocateMatrix(&C_cutlass, M, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(B);
return result;
}
result = AllocateMatrix(&C_reference, M, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(B);
cudaFree(C_cutlass);
return result;
}
result = cudaMemcpy(C_reference, C_cutlass, sizeof_C, cudaMemcpyDeviceToDevice);
if (result != cudaSuccess) {
std::cerr << "Failed to copy C_cutlass matrix to C_reference: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Launch CUTLASS GEMM.
//
result = CutlassSgemmNN(M, N, K, alpha, A, lda, B, ldb, beta, C_cutlass, ldc);
if (result != cudaSuccess) {
std::cerr << "CUTLASS GEMM kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Verify.
//
// Launch reference GEMM
result = ReferenceGemm(M, N, K, alpha, A, lda, B, ldb, beta, C_reference, ldc);
if (result != cudaSuccess) {
std::cerr << "Reference GEMM kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
// Copy to host and verify equivalence.
std::vector<float> host_cutlass(ldc * N, 0);
std::vector<float> host_reference(ldc * N, 0);
result = cudaMemcpy(host_cutlass.data(), C_cutlass, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy CUTLASS GEMM results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
result = cudaMemcpy(host_reference.data(), C_reference, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy Reference GEMM results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Free device memory allocations.
//
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
//
// Test for bit equivalence of results.
//
if (host_cutlass != host_reference) {
std::cerr << "CUTLASS results incorrect." << std::endl;
return cudaErrorUnknown;
}
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to basic_gemm example.
//
// usage:
//
// 00_basic_gemm <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions.
int problem[3] = { 128, 128, 128 };
for (int i = 1; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Scalars used for linear scaling the result of the matrix product.
float scalars[2] = { 1, 0 };
for (int i = 4; i < argc && i < 6; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 4];
}
//
// Run the CUTLASS GEMM test.
//
cudaError_t result = TestCutlassGemm(
problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| examples/00_basic_gemm/basic_gemm.cu/0 | {
"file_path": "examples/00_basic_gemm/basic_gemm.cu",
"repo_id": "examples",
"token_count": 5053
} | 0 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to use split-k version of matrix multiplication using functions and data
structures provided by CUTLASS; which we run on a NVIDIA Volta GPU.
What is split-k?
Consider a problem size of M = 128, N = 128, K = 4096. In this case, if my thread-block tile size (a
tile can be viewed as a 2d matrix) is 128x128x4096, then we launch a singled a thread-block taking
up a single SM of 84 SMs present on V100. Hence the efficiency of computation is really low. So, how
to solve it? This is where split-k comes in. It is a way of partitioning K-dimension of matrix
multiplication and distribute across multiple SMs and get better efficiency than single SM. In the
above example, we can partition K-dimension with split-k factor of 16 i.e., thread-block tile size
will be 128x128x256 and will be launching on 16 SMs. Once each thread-block computes their partial
inner product (1/16th of output), they accumulate to single output matrix.
Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing
high performance kernels at scale which works for multiple problem sizes with good abstractions is
really hard. CUTLASS solves this problem by providing simplified abstractions to compose
multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU
easily.
CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp
and thread-block level, they compute on their own tile-size with higher level of tile sizes being
composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used
to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute
threadblock-tile (tile size computed by a threadblock).
In this example, we split variable initialization into
1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel
can view them (logical to physical mapping)
2. Setting up computation properties : describes how the above set matrices will be used to compute
output of matrix multiplication.
First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for
GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the
rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise
operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for
alpha and beta to be equal to ElementComputeEpilogue = float. As we want to MMA instructions on
Volta and they support only half-precision floating point (fp16 or half), we use data type for
elements in input matrix A and B as cutlass::half_t. Volta also supports accumulation of partial dot
product to fp32, which can store wider range of numbers, we use it as data type of output matrix
elements and accumulation. We convey this to CUTLASS kernel by initializing template variables
ElementAccumulator (float), ElementComputeEpilogue (float), ElementInputA (cutlass::half_t),
ElementInputB (cutlass::half_t), ElementOutput (float). Communicating just the data type is not
enough. As the data is laid out linearly in memory, we have to convey the layout of matrices. We do
that by initializing template variable LayoutInputA to column major cutlass variable, LayoutInputB
to row major and LayoutOutput to row major. Next, we setup rules to compute alpha * X + beta * C
which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the
data type of output ElementOutput (float), the number of elements per vector memory access (16),
data type of accumulator (float) and data type of computation of linear combination (alpha * X +
beta * C).
Now that we setup the properties of data, we have to setup properties of computation.
Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x128x32,
64x64x4, 8x8x4 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally
deduce the amount of threads needed per thread-block, amount of shared memory, storing data in
bank-conflict free manner, and ton of other variables required to compose, initialize and launch a
high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from
understanding and coding complicated hardware optimizations which can easily go wrong.
There are few more template variables initialized such as, which threadblock tile of output matrix
is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on.
These are all put together to create a template variable which describes CUTLASS GEMM kernel using
cutlass::gemm::device::GemmSplitKParallel template.
The next step is to initialize physical data, instantiate and initialize CUTLASS kernel and run it.
We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come
in the way of learning CUTLASS.
Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS
kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the
important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space
memory required by the kernel we instantiated. If yes, we create it and pass it along with other
arguments created to initialize CUTLASS kernel then, the kernel is launched.
In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if
the output from CUTLASS kernel is same as reference GEMM kernel.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_splitk_parallel.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::ColumnMajor;
using LayoutInputB = cutlass::layout::RowMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm70;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 4>; // <- MMA Op tile M = 8, N = 8, K = 4
// This code section describes ?
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- This is the number of elements per
// vectorized memory access. For half
// precision, it's 8 elements. This becomes
// the vector width of math instructions in
// epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Put all the created template variables to create GemmSplitKParallel template variable
using Gemm = cutlass::gemm::device::GemmSplitKParallel<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp>;
int run() {
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (props.major != 7) {
std::cerr << "Volta Tensor Ops must be run on a machine with compute capability of 70, 72, or 75."
<< std::endl;
// Return 0 so tests pass if run on unsupported architectures or CUDA Toolkits.
return 0;
}
//
// Define problem size
//
const int length_m = 5120;
const int length_n = 4096;
const int length_k = 4096;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(0);
// Split K dimension into 16 partitions
int split_k_slices = 16;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Initialize CUTLASS kernel with arguments and workspace pointer
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
// Create instantiation for device reference gemm kernel
cutlass::reference::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue>
gemm_device;
// Launch device reference gemm kernel
gemm_device(problem_size,
alpha,
tensor_a.device_ref(),
tensor_b.device_ref(),
beta,
tensor_c.device_ref(),
tensor_ref_d.device_ref());
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main() {
//
// Volta Tensor Core operations exposed with mma.sync are first available in CUDA 10.1.
//
// CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples.
//
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Volta Tensor Core operations must be compiled with CUDA 10.1 Toolkit or later." << std::endl;
// Returning zero, so this test passes when built with older CUDA Toolkits. Its action are no-op.
return 0;
}
else {
return run();
}
}
| examples/06_splitK_gemm/splitk_gemm.cu/0 | {
"file_path": "examples/06_splitK_gemm/splitk_gemm.cu",
"repo_id": "examples",
"token_count": 6186
} | 1 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/device/gemm_complex.h"
#include "cutlass/util/reference/device/tensor_relu.h"
#include "reference/device/tensor_scale_bias.h"
#include "helper.h"
#define CHECK_GT(val1, val2) \
if((val1) <= (val2)) \
std::cerr << __FILE__ << " " << __LINE__ << ": CHECK_GT failed\n";
#define CHECK_TRUE(val) \
if(!(val)) \
std::cerr << __FILE__ << " " << __LINE__ << ": CHECK_TRUE failed\n";
////////////////////////////////////////////////////////////////////////////////
template <typename Gemm0_, typename Gemm1_>
struct B2bNonFusedGemmRun
{
using Gemm0 = Gemm0_;
using Gemm1 = Gemm1_;
using ElementAccumulator = typename Gemm0::ElementAccumulator;
using ElementCompute = typename Gemm0::GemmKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
cutlass::Distribution::Kind init_Bias;
uint64_t seed;
//
// Methods
//
B2bNonFusedGemmRun(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_Bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_), init_Bias(init_Bias_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
cutlass::reference::host::TensorFillRandomUniform(
view, seed, 2, -2, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else if (dist_kind == cutlass::Distribution::AllZeros) {
cutlass::reference::host::TensorFill(view, Element(0));
}
else if (dist_kind == cutlass::Distribution::AllOnes) {
cutlass::reference::host::TensorFill(view, Element(1));
}
else {
std::cerr << "Not implemented\n";
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmCoord problem_size_0,
cutlass::gemm::GemmCoord problem_size_1,
ElementCompute alpha0 = ElementCompute(1),
ElementCompute beta0 = ElementCompute(0),
ElementCompute alpha1 = ElementCompute(1),
ElementCompute beta1 = ElementCompute(0),
bool relu = true,
int warm_ups = 1,
int runs = 100) {
//
// Allocate the GEMM workspace
//
cutlass::HostTensor<
typename Gemm0::ElementA,
typename Gemm0::LayoutA> tensor_A0(problem_size_0.mk());
cutlass::HostTensor<
typename Gemm0::ElementB,
typename Gemm0::LayoutB> tensor_B0(problem_size_0.kn());
cutlass::HostTensor<
typename Gemm0::ElementC,
typename Gemm0::LayoutC> tensor_C0(problem_size_0.mn());
cutlass::HostTensor<
ElementCompute,
typename Gemm0::LayoutC> tensor_Bias0({1, problem_size_0.n()});
cutlass::HostTensor<
typename Gemm0::ElementC,
typename Gemm0::LayoutC> tensor_D0(problem_size_0.mn());
cutlass::HostTensor<
typename Gemm0::ElementC,
typename Gemm0::LayoutC> reference_D0(problem_size_0.mn());
cutlass::HostTensor<
typename Gemm1::ElementB,
typename Gemm1::LayoutB> tensor_B1(problem_size_1.kn());
cutlass::HostTensor<
typename Gemm1::ElementC,
typename Gemm1::LayoutC> tensor_C1(problem_size_1.mn());
cutlass::HostTensor<
ElementCompute,
typename Gemm1::LayoutC> tensor_Bias1({1, problem_size_1.n()});
cutlass::HostTensor<
typename Gemm1::ElementC,
typename Gemm1::LayoutC> tensor_D1(problem_size_1.mn());
cutlass::HostTensor<
typename Gemm1::ElementC,
typename Gemm1::LayoutC> reference_D1(problem_size_1.mn());
CHECK_TRUE(initialize_tensor(tensor_A0.host_view(), init_A, seed + 2019));
CHECK_TRUE(initialize_tensor(tensor_B0.host_view(), init_B, seed + 2018));
CHECK_TRUE(initialize_tensor(tensor_C0.host_view(), init_C, seed + 2017));
CHECK_TRUE(initialize_tensor(tensor_Bias0.host_view(), init_Bias, seed + 2014));
CHECK_TRUE(initialize_tensor(tensor_B1.host_view(), init_B, seed + 2016));
CHECK_TRUE(initialize_tensor(tensor_C1.host_view(), init_C, seed + 2015));
CHECK_TRUE(initialize_tensor(tensor_Bias1.host_view(), init_Bias, seed + 2013));
cutlass::reference::host::TensorFill(
tensor_D0.host_view());
cutlass::reference::host::TensorFill(
tensor_D1.host_view());
cutlass::reference::host::TensorFill(
reference_D0.host_view());
cutlass::reference::host::TensorFill(
reference_D1.host_view());
tensor_A0.sync_device();
tensor_B0.sync_device();
tensor_C0.sync_device();
tensor_Bias0.sync_device();
tensor_D0.sync_device();
tensor_B1.sync_device();
tensor_C1.sync_device();
tensor_Bias1.sync_device();
tensor_D1.sync_device();
reference_D0.sync_device();
reference_D1.sync_device();
//
// Initialize the GEMM operator
//
typename Gemm0::Arguments arguments_0{
problem_size_0,
tensor_A0.device_ref(),
tensor_B0.device_ref(),
{tensor_Bias0.device_data(), typename Gemm0::LayoutC::Stride(0)},
tensor_D0.device_ref(),
{alpha0, beta0}
};
typename Gemm1::Arguments arguments_1{
problem_size_1,
tensor_D0.device_ref(),
tensor_B1.device_ref(),
{tensor_Bias1.device_data(), typename Gemm1::LayoutC::Stride(0)},
tensor_D1.device_ref(),
{alpha1, beta1}
};
Gemm0 gemm_op_0;
Gemm1 gemm_op_1;
cutlass::Status status = gemm_op_0.initialize(arguments_0);
CUTLASS_CHECK(status);
status = gemm_op_1.initialize(arguments_1);
CUTLASS_CHECK(status);
for(int i = 0; i < warm_ups; i++) {
status = gemm_op_0();
CUTLASS_CHECK(status);
status = gemm_op_1();
CUTLASS_CHECK(status);
}
//
// Run the GEMM
//
cudaEvent_t start, stop1, stop2;
cudaEventCreate(&start);
cudaEventCreate(&stop1);
cudaEventCreate(&stop2);
cudaEventRecord(start);
for(int i = 0; i < runs; i++) {
status = gemm_op_0();
CUTLASS_CHECK(status);
}
cudaEventRecord(stop1);
for(int i = 0; i < runs; i++) {
status = gemm_op_1();
CUTLASS_CHECK(status);
}
cudaEventRecord(stop2);
cudaDeviceSynchronize();
float gemm0Time, gemm1Time, totalTime;
cudaEventElapsedTime(&gemm0Time, start, stop1);
cudaEventElapsedTime(&gemm1Time, stop1, stop2);
cudaEventElapsedTime(&totalTime, start, stop2);
std::cout << "gemm 0 time " << gemm0Time / (float)runs << " ms\n";
std::cout << "gemm 1 time " << gemm1Time / (float)runs << " ms\n";
std::cout << "Non-fusion time " << totalTime / (float)runs << " ms\n";
tensor_D0.sync_host();
tensor_D1.sync_host();
//
// Verify
//
cutlass::reference::device::Gemm<
typename Gemm0::ElementA, typename Gemm0::LayoutA,
typename Gemm0::ElementB, typename Gemm0::LayoutB,
typename Gemm0::ElementC, typename Gemm0::LayoutC, ElementCompute,
ElementAccumulator, typename Gemm0::Operator>
reference_gemm_0;
cutlass::reference::device::Gemm<
typename Gemm1::ElementA, typename Gemm1::LayoutA,
typename Gemm1::ElementB, typename Gemm1::LayoutB,
typename Gemm1::ElementC, typename Gemm1::LayoutC, ElementCompute,
ElementAccumulator, typename Gemm1::Operator>
reference_gemm_1;
reference_gemm_0(
problem_size_0,
alpha0,
tensor_A0.device_ref(),
tensor_B0.device_ref(),
beta0,
{tensor_Bias0.device_data(), typename Gemm0::LayoutC::Stride(0)},
reference_D0.device_ref()
);
if(relu) {
cutlass::reference::device::TensorReLu(reference_D0.device_view());
}
reference_gemm_1(
problem_size_1,
alpha1,
reference_D0.device_ref(),
tensor_B1.device_ref(),
beta1,
{tensor_Bias1.device_data(), typename Gemm1::LayoutC::Stride(0)},
reference_D1.device_ref()
);
if(relu) {
cutlass::reference::device::TensorReLu(reference_D1.device_view());
}
// Wait for kernels to finish
cudaDeviceSynchronize();
reference_D0.sync_host();
reference_D1.sync_host();
CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D0.host_view()), 0);
CHECK_GT(cutlass::reference::host::TensorNorm(reference_D0.host_view()), 0);
CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D1.host_view()), 0);
CHECK_GT(cutlass::reference::host::TensorNorm(reference_D1.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(
reference_D1.host_view(),
tensor_D1.host_view());
CHECK_TRUE(passed);
if (!passed) {
std::stringstream fname;
fname << "error_B2bGemm_device_nonfused.txt";
std::cerr << "Dumping results in " << fname.str() << "\n";
std::ofstream file(fname.str());
file
<< "A0 =\n" << tensor_A0.host_view()
<< "\nB0 =\n" << tensor_B0.host_view()
<< "\nC0 =\n" << tensor_C0.host_view()
<< "\nBias0:\n" << tensor_Bias0.host_view() << "\n"
<< "\nD0 =\n" << tensor_D0.host_view()
<< "\nB1 =\n" << tensor_B1.host_view()
<< "\nC1 =\n" << tensor_C1.host_view()
<< "\nBias1:\n" << tensor_Bias1.host_view() << "\n"
<< "\n\nReference =\n" << reference_D1.host_view()
<< "\nComputed =\n" << tensor_D1.host_view();
}
return passed;
}
};
template <typename B2bGemm_>
struct B2bFusedGemmRun
{
using B2bGemm = B2bGemm_;
using ElementAccumulator = typename B2bGemm::ElementAccumulator;
using ElementCompute = typename B2bGemm::B2bGemmKernel::Epilogue::OutputOp::ElementCompute;
/// Initialization
cutlass::Distribution::Kind init_A;
cutlass::Distribution::Kind init_B;
cutlass::Distribution::Kind init_C;
cutlass::Distribution::Kind init_Scale;
cutlass::Distribution::Kind init_Bias;
uint64_t seed;
//
// Methods
//
B2bFusedGemmRun(
cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_Scale_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_Bias_ = cutlass::Distribution::Uniform,
uint64_t seed_ = 2080
):
init_A(init_A_), init_B(init_B_), init_C(init_C_),
init_Scale(init_Scale_), init_Bias(init_Bias_), seed(seed_) { }
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(
cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
cutlass::reference::host::TensorFillRandomUniform(
view, seed, 2, -2, 0);
}
else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5);
}
else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(
view.data(), view.capacity());
}
else if (dist_kind == cutlass::Distribution::AllZeros) {
cutlass::reference::host::TensorFill(view, Element(0));
}
else if (dist_kind == cutlass::Distribution::AllOnes) {
cutlass::reference::host::TensorFill(view, Element(1));
}
else {
std::cerr << "Not implemented\n";
return false;
}
return true;
}
/// Executes one test
bool run(
cutlass::gemm::GemmCoord problem_size_0,
cutlass::gemm::GemmCoord problem_size_1,
ElementCompute alpha0 = ElementCompute(1),
ElementCompute beta0 = ElementCompute(0),
ElementCompute alpha1 = ElementCompute(1),
ElementCompute beta1 = ElementCompute(0),
cutlass::gemm::GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm,
// batch_count is used as split-k when mode is kGemm according
// to the GemmUniversal interface
int batch_count = 1,
int64_t batch_stride_A0 = 0,
int64_t batch_stride_B0 = 0,
int64_t batch_stride_C0 = 0,
int64_t batch_stride_B1 = 0,
int64_t batch_stride_C1 = 0,
int64_t batch_stride_D1 = 0,
int64_t batch_stride_Bias0 = 0,
int64_t batch_stride_Scale0 = 0,
bool relu = true,
int warm_ups = 1,
int runs = 100) {
//
// Allocate the GEMM workspace
//
cutlass::gemm::GemmCoord CoordA0(problem_size_0.m(), problem_size_0.n(), batch_count * problem_size_0.k());
cutlass::gemm::GemmCoord CoordB0(problem_size_0.m(), problem_size_0.n(), batch_count * problem_size_0.k());
cutlass::gemm::GemmCoord CoordC0(problem_size_0.m(), batch_count * problem_size_0.n(), problem_size_0.k());
cutlass::gemm::GemmCoord CoordB1(problem_size_1.m(), problem_size_1.n(), batch_count * problem_size_1.k());
cutlass::gemm::GemmCoord CoordC1(problem_size_1.m(), batch_count * problem_size_1.n(), problem_size_1.k());
cutlass::HostTensor<
typename B2bGemm::ElementA,
typename B2bGemm::LayoutA> tensor_A0(CoordA0.mk());
cutlass::HostTensor<
typename B2bGemm::ElementB,
typename B2bGemm::LayoutB> tensor_B0(CoordB0.kn());
cutlass::HostTensor<
typename B2bGemm::ElementC,
typename B2bGemm::LayoutC> tensor_C0(CoordC0.mn());
cutlass::HostTensor<
typename B2bGemm::ElementScaleBias,
typename B2bGemm::LayoutScaleBias> tensor_Scale0;
if(alpha0 == ElementCompute(0)) //per-channel scale
tensor_Scale0.resize({1, batch_count * problem_size_0.n()});
cutlass::HostTensor<
typename B2bGemm::ElementScaleBias,
typename B2bGemm::LayoutScaleBias> tensor_Bias0({1, batch_count * problem_size_0.n()});
cutlass::HostTensor<
ElementAccumulator,
typename B2bGemm::LayoutC> reference_Z0(CoordC0.mn());
cutlass::HostTensor<
typename B2bGemm::ElementC,
typename B2bGemm::LayoutC> reference_D0(CoordC0.mn());
cutlass::HostTensor<
typename B2bGemm::ElementB,
typename B2bGemm::LayoutB> tensor_B1(CoordB1.kn());
cutlass::HostTensor<
typename B2bGemm::ElementC,
typename B2bGemm::LayoutC> tensor_C1(CoordC1.mn());
cutlass::HostTensor<
typename B2bGemm::ElementC,
typename B2bGemm::LayoutScaleBias> tensor_Bias1({1, batch_count * problem_size_1.n()});
cutlass::HostTensor<
typename B2bGemm::ElementC,
typename B2bGemm::LayoutC> tensor_D1(CoordC1.mn());
cutlass::HostTensor<
typename B2bGemm::ElementC,
typename B2bGemm::LayoutC> reference_D1(CoordC1.mn());
CHECK_TRUE(initialize_tensor(tensor_A0.host_view(), init_A, seed + 2019));
CHECK_TRUE(initialize_tensor(tensor_B0.host_view(), init_B, seed + 2018));
CHECK_TRUE(initialize_tensor(tensor_C0.host_view(), init_C, seed + 2017));
if(alpha0 == ElementCompute(0)) //per-channel scale
CHECK_TRUE(initialize_tensor(tensor_Scale0.host_view(), init_Scale, seed + 2014));
CHECK_TRUE(initialize_tensor(tensor_Bias0.host_view(), init_Bias, seed + 2013));
CHECK_TRUE(initialize_tensor(tensor_B1.host_view(), init_B, seed + 2016));
CHECK_TRUE(initialize_tensor(tensor_C1.host_view(), init_C, seed + 2015));
CHECK_TRUE(initialize_tensor(tensor_Bias1.host_view(), init_Bias, seed + 2012));
cutlass::reference::host::TensorFill(
tensor_D1.host_view());
cutlass::reference::host::TensorFill(
reference_D0.host_view());
cutlass::reference::host::TensorFill(
reference_D1.host_view());
tensor_A0.sync_device();
tensor_B0.sync_device();
tensor_C0.sync_device();
if(alpha0 == ElementCompute(0)) //per-channel scale
tensor_Scale0.sync_device();
tensor_Bias0.sync_device();
tensor_B1.sync_device();
tensor_C1.sync_device();
tensor_Bias1.sync_device();
tensor_D1.sync_device();
reference_D0.sync_device();
reference_D1.sync_device();
//
// Initialize the GEMM operator
//
typename B2bGemm::Arguments arguments{
mode,
problem_size_0,
problem_size_1,
tensor_A0.device_ref(),
tensor_B0.device_ref(),
tensor_C0.device_ref(),
tensor_Scale0.device_ref(),
tensor_Bias0.device_ref(),
tensor_B1.device_ref(),
{tensor_Bias1.device_data(), typename B2bGemm::LayoutC::Stride(0)},
tensor_D1.device_ref(),
batch_stride_A0,
batch_stride_B0,
batch_stride_B1,
batch_stride_C1,
batch_stride_D1,
batch_stride_Bias0,
batch_stride_Scale0,
{alpha0, beta0},
{alpha1, beta1},
batch_count,
};
B2bGemm b2b_gemm_op;
cutlass::Status status = b2b_gemm_op.can_implement(arguments);
if(status != cutlass::Status::kSuccess) {
std::cout << "Problem sizes not supported.\n"
<< "Requirments:\n"
<< " problem_size_0.M = problem_size_1.M\n"
<< " problem_size_0.N = problem_size_1.K\n"
<< " ThreadblockShape0::kN = problem_size_0.N\n"
<< " ThreadblockShape1::kN = problem_size_1.N" << std::endl;
}
status = b2b_gemm_op.initialize(arguments);
CUTLASS_CHECK(status);
for(int i = 0; i < warm_ups; i++) {
status = b2b_gemm_op();
CUTLASS_CHECK(status);
}
//
// Run the GEMM
//
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for(int i = 0; i < runs; i++) {
status = b2b_gemm_op();
CUTLASS_CHECK(status);
}
cudaEventRecord(stop);
cudaDeviceSynchronize();
float gemmTime;
cudaEventElapsedTime(&gemmTime, start, stop);
std::cout << "Fusion time " << gemmTime / (float)runs << " ms\n";
tensor_D1.sync_host();
//
// Verify
//
cutlass::reference::device::GemmComplex<
typename B2bGemm::ElementA, typename B2bGemm::LayoutA,
typename B2bGemm::ElementB, typename B2bGemm::LayoutB,
ElementAccumulator, typename B2bGemm::LayoutC,
ElementAccumulator, ElementAccumulator
>(
problem_size_0,
ElementAccumulator(1), //intermediate alpha=1
tensor_A0.device_ref(),
cutlass::ComplexTransform::kNone,
tensor_B0.device_ref(),
cutlass::ComplexTransform::kNone,
ElementAccumulator(0), //beta = 0
reference_Z0.device_ref(),
reference_Z0.device_ref(),
ElementAccumulator(0),
int(batch_count),
batch_stride_A0,
batch_stride_B0,
batch_stride_C0,
batch_stride_C0
);
cutlass::reference::device::TensorScaleBiasGemmBatched<
ElementAccumulator, typename B2bGemm::ElementC, typename B2bGemm::LayoutC,
ElementCompute, typename B2bGemm::LayoutScaleBias
> (
problem_size_0,
reference_Z0.device_ref(),
reference_D0.device_ref(),
alpha0,
tensor_Scale0.device_ref(),
tensor_Bias0.device_ref(),
int(batch_count),
batch_stride_C0,
batch_stride_C0,
batch_stride_Scale0,
batch_stride_Bias0
);
if(relu) {
cutlass::reference::device::TensorReLu(reference_D0.device_view());
}
cutlass::reference::device::GemmComplex<
typename B2bGemm::ElementA, typename B2bGemm::LayoutA,
typename B2bGemm::ElementB, typename B2bGemm::LayoutB,
typename B2bGemm::ElementC, typename B2bGemm::LayoutC,
ElementCompute, ElementAccumulator
>(
problem_size_1,
alpha1, //intermediate alpha=1
reference_D0.device_ref(),
cutlass::ComplexTransform::kNone,
tensor_B1.device_ref(),
cutlass::ComplexTransform::kNone,
beta1, //beta = 0
{tensor_Bias1.device_data(), typename B2bGemm::LayoutC::Stride(0)},
reference_D1.device_ref(),
ElementAccumulator(0),
int(batch_count),
batch_stride_C0,
batch_stride_B1,
batch_stride_C1,
batch_stride_D1
);
if(relu) {
cutlass::reference::device::TensorReLu(reference_D1.device_view());
}
cudaDeviceSynchronize();
reference_D0.sync_host();
reference_D1.sync_host();
CHECK_GT(cutlass::reference::host::TensorNorm(reference_D0.host_view()), 0);
CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D1.host_view()), 0);
CHECK_GT(cutlass::reference::host::TensorNorm(reference_D1.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(
reference_D1.host_view(),
tensor_D1.host_view());
CHECK_TRUE(passed);
if (!passed)
{
std::stringstream fname;
fname << "error_B2bGemm_device_fused.txt";
std::cerr << "Dumping results in " << fname.str() << "\n";
std::ofstream file(fname.str());
file
<< "A0 =\n" << tensor_A0.host_view()
<< "\nB0 =\n" << tensor_B0.host_view()
<< "\nC0 =\n" << tensor_C0.host_view()
<< "\nScale0:\n" << tensor_Scale0.host_view() << "\n"
<< "\nBias0:\n" << tensor_Bias0.host_view() << "\n"
<< "\nB1 =\n" << tensor_B1.host_view()
<< "\nC1 =\n" << tensor_C1.host_view()
<< "\nBias1:\n" << tensor_Bias1.host_view() << "\n"
<< "\n\nReference =\n" << reference_D1.host_view()
<< "\nComputed =\n" << tensor_D1.host_view();
}
return passed;
}
};
////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/b2b_gemm_run.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/b2b_gemm_run.h",
"repo_id": "examples",
"token_count": 10562
} | 2 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts. Partial
specializations here choose 'device::GemmTransposed' to implement this functionality.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/numeric_types.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_pipelined.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
#include "cutlass/transform/threadblock/vector_iterator.h"
#include "cutlass/transform/threadblock/predicated_vector_access_iterator.h"
#include "kernel/b2b_gemm.h"
#include "threadblock/default_b2b_mma.h"
#include "threadblock/default_b2b_mma_smem_accumulator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp0,
/// Epilogue output operator
typename EpilogueOutputOp1,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation performed by GEMM
typename Operator>
struct DefaultB2bGemm<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape0, ThreadblockShape1,
WarpShape0, WarpShape1, InstructionShape,
EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, Stages,
Operator, true> {
/// Define the threadblock-scoped matrix multiply-accumulate
using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1,
InstructionShape, Stages, Operator, EpilogueOutputOp0, false, true>::ThreadblockB2bMma;
static const int kPartitionsK1 = ThreadblockShape1::kK / WarpShape1::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape1, typename B2bMma::Operator1, kPartitionsK1, EpilogueOutputOp1,
EpilogueOutputOp1::kCount>::Epilogue;
/// Define the kernel-level GEMM operator.
using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Turing Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp0,
/// Epilogue output operator
typename EpilogueOutputOp1,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Operation performed by GEMM
typename Operator
>
struct DefaultB2bGemm<
ElementA, LayoutA, kAlignmentA,
ElementB, LayoutB, kAlignmentB,
ElementC, layout::RowMajor,
ElementAccumulator,
arch::OpClassTensorOp,
arch::Sm75,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
ThreadblockSwizzle,
2,
Operator,
true
> {
/// Define the threadblock-scoped matrix multiply-accumulate
using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
layout::RowMajor,
arch::OpClassTensorOp,
arch::Sm75,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
2,
Operator,
EpilogueOutputOp0,
false,
true
>::ThreadblockB2bMma;
static const int kPartitionsK1 = ThreadblockShape1::kK / WarpShape1::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape1,
typename B2bMma::Operator1,
kPartitionsK1,
EpilogueOutputOp1,
EpilogueOutputOp1::kCount
>::Epilogue;
/// Define the kernel-level GEMM operator.
using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle>;
};
/// Partial specialization for Ampere Integer Matrix Multiply Interleaved layout
template <
/// Element type for A matrix operand
typename ElementA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp0,
/// Epilogue output operator
typename EpilogueOutputOp1,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Number of Interleaved k
int InterleavedK,
/// Operation performed by GEMM
typename Operator>
struct DefaultB2bGemm<
ElementA, layout::ColumnMajorInterleaved<InterleavedK>, kAlignmentA,
ElementB, layout::RowMajorInterleaved<InterleavedK>, kAlignmentB,
ElementC, layout::ColumnMajorInterleaved<InterleavedK>, int32_t,
arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1,
InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1,
ThreadblockSwizzle, Stages,
Operator, true> {
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementAccumulator = int32_t;
/// Define the threadblock-scoped matrix multiply-accumulate
using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, LayoutC, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1,
InstructionShape, Stages, Operator, EpilogueOutputOp0,
true, true>::ThreadblockB2bMma;
static const int kPartitionsK1 = ThreadblockShape1::kK / WarpShape1::kK;
/// Define the epilogue
using Epilogue = typename cutlass::epilogue::threadblock::
DefaultInterleavedEpilogueTensorOp<
ThreadblockShape1, typename B2bMma::Operator1, kPartitionsK1, EpilogueOutputOp1,
64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue;
/// Define the kernel-level GEMM operator.
using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle>;
};
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Turing Integer Tensor Core Interleaved layout
template <
/// Element type for A matrix operand
typename ElementA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape0,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape1,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape0,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape1,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp0,
/// Epilogue output operator
typename EpilogueOutputOp1,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of Interleaved k
int InterleavedK,
/// Operation performed by GEMM
typename Operator>
struct DefaultB2bGemm<ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
kAlignmentA, ElementB,
layout::RowMajorInterleaved<InterleavedK>, kAlignmentB,
ElementC, layout::ColumnMajorInterleaved<InterleavedK>,
int32_t, arch::OpClassTensorOp, arch::Sm75,
ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1,
InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1,
ThreadblockSwizzle, 2, Operator, true> {
using LayoutA = layout::ColumnMajorInterleaved<InterleavedK>;
using LayoutB = layout::RowMajorInterleaved<InterleavedK>;
using LayoutC = layout::ColumnMajorInterleaved<InterleavedK>;
using ElementAccumulator = int32_t;
/// Define the threadblock-scoped matrix multiply-accumulate
using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, LayoutC, arch::OpClassTensorOp, arch::Sm75,
ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1,
InstructionShape, 2, Operator, EpilogueOutputOp0, true, true>::ThreadblockB2bMma;
static const int kPartitionsK1 = ThreadblockShape1::kK / WarpShape1::kK;
/// Define the epilogue for the 2nd Gemm
using Epilogue = typename cutlass::epilogue::threadblock::
DefaultInterleavedEpilogueTensorOp<
ThreadblockShape1, typename B2bMma::Operator1, kPartitionsK1, EpilogueOutputOp1,
64 / sizeof_bits<ElementC>::value, InterleavedK>::Epilogue;
/// Define the kernel-level GEMM operator.
using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle>;
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| examples/13_two_tensor_op_fusion/kernel/default_b2b_gemm_smem_accumulator.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/kernel/default_b2b_gemm_smem_accumulator.h",
"repo_id": "examples",
"token_count": 5060
} | 3 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implements several threadblock-swizzling functions for grouped kernels
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/kernel/grouped_problem_visitor.h"
#include "cutlass/gemm/kernel/gemm_grouped_problem_visitor.h"
#include "kernel/b2b_gemm_grouped_problem_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
struct GroupedThreadblockSwizzleBase {};
/// Helper for determining if a swizzling function is specialized for grouped operation
template <typename ThreadblockSwizzle>
struct IsGroupedSwizzle {
static bool const value = cutlass::platform::is_base_of<GroupedThreadblockSwizzleBase, ThreadblockSwizzle>::value;
};
} // namespace detail
/// Swizzling function for grouped kernels
template <typename ProblemVisitor_>
struct GroupedThreadblockSwizzle : detail::GroupedThreadblockSwizzleBase {
using ProblemVisitor = ProblemVisitor_;
ProblemVisitor problem_visitor;
CUTLASS_HOST_DEVICE
GroupedThreadblockSwizzle(typename ProblemVisitor::Params& params,
typename ProblemVisitor::SharedStorage& shared_storage,
int block_idx) : problem_visitor(params, shared_storage, block_idx) {}
/// Obtains the threadblock offset (in units of threadblock-scoped tiles)
CUTLASS_DEVICE
GemmCoord get_tile_offset(int /*log_tile*/) const {
GemmCoord problem_size = problem_visitor.problem_size();
int32_t threadblock_idx = int32_t(problem_visitor.threadblock_idx());
GemmCoord grid_shape = problem_visitor.grid_shape(problem_size);
return GemmCoord(int(threadblock_idx / grid_shape.n()),
int(threadblock_idx % grid_shape.n()),
0);
}
/// Dummy method to satisfy API for threadblock swizzling functions
CUTLASS_HOST_DEVICE
static int get_log_tile(GemmCoord /*tiled_shape*/) {
return 0;
}
};
template <
typename ThreadblockShape,
typename LayoutC,
cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode_ = cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly,
int PrefetchTileCount = 128,
int ThreadCount = PrefetchTileCount>
struct B2bGemmGroupedThreadblockSwizzle : GroupedThreadblockSwizzle<
cutlass::gemm::kernel::B2bGemmGroupedProblemVisitor<
ThreadblockShape,
GroupScheduleMode_,
PrefetchTileCount,
ThreadCount,
platform::is_same<LayoutC, cutlass::layout::ColumnMajor>::value
>
> {
using Base = GroupedThreadblockSwizzle<cutlass::gemm::kernel::B2bGemmGroupedProblemVisitor<
ThreadblockShape,
GroupScheduleMode_,
PrefetchTileCount,
ThreadCount,
platform::is_same<LayoutC, cutlass::layout::ColumnMajor>::value>>;
CUTLASS_HOST_DEVICE
B2bGemmGroupedThreadblockSwizzle(typename Base::ProblemVisitor::Params& params,
typename Base::ProblemVisitor::SharedStorage& shared_storage,
int block_idx) : Base(params, shared_storage, block_idx) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| examples/13_two_tensor_op_fusion/threadblock/grouped_threadblock_swizzle.h/0 | {
"file_path": "examples/13_two_tensor_op_fusion/threadblock/grouped_threadblock_swizzle.h",
"repo_id": "examples",
"token_count": 2141
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example requires NVIDIA Maxwell GPU or beyond.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// CUTLASS Includes
#include "cutlass/cutlass.h"
#include "cutlass/core_io.h"
#include "cutlass/functional.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/warp/mma_simt.h"
#include "cutlass/epilogue/warp/fragment_iterator_simt.h"
#include "cutlass/epilogue/warp/tile_iterator_simt.h"
// CUTLASS Utility Includes
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/gemm_complex.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// Define the overal warp-level problem shape
int const kM = 14;
int const kN = 27;
int const kK = 17;
///////////////////////////////////////////////////////////////////////////////////////////////////
// Define a warp-level GEMM operator.
//
// This template could be part of the CUTLASS Template Library or implemented internally. This
// wraps the matrix multiply operation and epilogue with a GEMM-like interface that can be
// instantiated in device code.
namespace cutlass {
namespace gemm {
namespace warp {
template <
typename Shape,
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementScalar
>
class GemmSimt {
public:
using Policy = cutlass::gemm::warp::MmaSimtPolicy<
cutlass::MatrixShape<4, 8>,
cutlass::layout::RowMajorInterleaved<2>,
cutlass::gemm::GemmShape<4, 4, 1>
>;
using MmaWarp = cutlass::gemm::warp::MmaSimt<
cutlass::gemm::GemmShape<16, 32, 8>,
float,
cutlass::layout::RowMajor,
float,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::RowMajor,
Policy
>;
// Number of 'K groups'
int const kKgroups = Shape::kK;
using FragmentIterator = cutlass::epilogue::warp::FragmentIteratorSimt<
typename MmaWarp::Shape,
typename MmaWarp::ThreadMma,
layout::RowMajor, // SMEM layout
typename MmaWarp::Policy
>;
using AccumulatorTileIterator = cutlass::epilogue::warp::TileIteratorSimtCanonical<
typename MmaWarp::Shape,
typename MmaWarp::ThreadMma,
float, // ElementAccumulator
layout::RowMajor, // SMEM layout
typename MmaWarp::Policy
>;
using TensorRefA = typename MmaWarp::IteratorA::TensorRef;
using TensorRefB = typename MmaWarp::IteratorB::TensorRef;
using TensorRefC = typename AccumulatorTileIterator::TensorRef;
public:
CUTLASS_HOST_DEVICE
GemmSimt() { }
CUTLASS_DEVICE
void operator()(
ElementScalar alpha,
TensorRefA ref_A,
TensorRefB ref_B,
ElementScalar beta,
TensorRefC ref_C,
TensorRefC ref_D,
int lane_id) const {
// Instantiate iterators pointing to slices of the A and B matrices in shared memory
typename MmaWarp::IteratorA iter_A(ref_A, {Shape::kM, Shape::kK}, lane_id);
typename MmaWarp::IteratorB iter_B(ref_B, {Shape::kK, Shape::kN}, lane_id);
// Instantiate and clear accumulator tile holding the C matrix
typename MmaWarp::FragmentC accum;
accum.clear();
// Instantiate the warp-level matrix multiply operator
MmaWarp mma_op;
// Instantiate fragments holding the slice of the matrix held by each warp
typename MmaWarp::FragmentA frag_A[2];
typename MmaWarp::FragmentB frag_B[2];
// Load fragments from shared memory
iter_A.load(frag_A[0]);
iter_B.load(frag_B[0]);
++iter_A;
++iter_B;
// Load fragments from shared memory
CUTLASS_PRAGMA_UNROLL
for (int k = 0; k < kKgroups; ++k) {
// Load fragments from shared memory
iter_A.load(frag_A[(k + 1) % 2]);
iter_B.load(frag_B[(k + 1) % 2]);
++iter_A;
++iter_B;
// Compute the matrix multiply
mma_op(accum, frag_A[k % 2], frag_B[k % 2], accum);
}
// Instantiate iterators
FragmentIterator accum_frag_it(accum);
AccumulatorTileIterator source_tile_it(ref_C, {Shape::kM, Shape::kN}, lane_id);
AccumulatorTileIterator dest_tile_it(ref_D, {Shape::kM, Shape::kN}, lane_id);
// Define function objects for linear scaling operation
cutlass::multiplies<typename FragmentIterator::Fragment> mul_source;
cutlass::multiply_add<typename FragmentIterator::Fragment> mul_add_accumulator;
// Iterate over the epilogue components
CUTLASS_PRAGMA_UNROLL
for (int idx = 0; idx < FragmentIterator::kIterations; ++idx) {
// Define storage for slices of the accumulators
typename FragmentIterator::Fragment accum_fragment;
typename FragmentIterator::Fragment source_fragment;
// Select a slice of accumulators from the accumulator tile
accum_frag_it.load(accum_fragment);
++accum_frag_it;
// Load a corresponding slice from Shared memory
source_tile_it.load(source_fragment);
++source_tile_it;
// Compute linear scaling - alpha * AB + beta * C
source_fragment = mul_source(beta, source_fragment);
accum_fragment = mul_add_accumulator(alpha, accum_fragment, source_fragment);
// Store the result to shared memory
dest_tile_it.store(accum_fragment);
++dest_tile_it;
}
}
};
} // namespace warp
} // namespace gemm
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
// Sample kernel demonstrating a collective GEMM operation by a warp on arbitrary matrices held
// in Shared Memory.
__global__ void kernel(
float *D_gmem,
float alpha,
float const *A_gmem,
float const *B_gmem,
float beta,
float const *C_gmem) {
// Define several matrices in shared memory
__shared__ float A[kM][kK];
__shared__ float B[kN][kK];
__shared__ float C[kM][kN];
// Copy data into SMEM
if (threadIdx.x == 0) {
CUTLASS_PRAGMA_NO_UNROLL
for (int m = 0; m < kM; ++m) {
for (int k = 0; k < kK; ++k) {
A[m][k] = A_gmem[m * kK + k];
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int n = 0; n < kN; ++n) {
for (int k = 0; k < kK; ++k) {
B[n][k] = B_gmem[n * kK + k];
}
}
CUTLASS_PRAGMA_NO_UNROLL
for (int m = 0; m < kM; ++m) {
CUTLASS_PRAGMA_NO_UNROLL
for (int n = 0; n < kN; ++n) {
C[m][n] = C_gmem[m * kN + n];
}
}
}
__syncthreads();
//
// Instantiate a warp-level matrix multiply operator given the fundamental instruction shape (8x8x4),
// overall shape, data type of each operand, and layout of each operand.
//
using GemmSimt = cutlass::gemm::warp::GemmSimt<
cutlass::gemm::GemmShape<kM, kN, kK>,
float, // Data type of A elements
cutlass::layout::RowMajor, // Layout of A matrix
float, // Data type of B elements
cutlass::layout::ColumnMajor, // Layout of B matrix
float, // Data type of C elements
cutlass::layout::RowMajor, // Layout of C matrix
float // Scalar type of alpha and beta
>;
// Instantiate the GEMM operator
GemmSimt gemm;
// Execute the warp-level GEMM operation
gemm(
alpha,
{&A[0][0], kK},
{&B[0][0], kK},
beta,
{&C[0][0], kN},
{&C[0][0], kN},
threadIdx.x);
__syncthreads();
// Copy data into SMEM
if (threadIdx.x == 0) {
CUTLASS_PRAGMA_NO_UNROLL
for (int m = 0; m < kM; ++m) {
CUTLASS_PRAGMA_NO_UNROLL
for (int n = 0; n < kN; ++n) {
D_gmem[m * kN + n] = C[m][n];
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, const char *arg[]) {
cutlass::HostTensor<float, cutlass::layout::RowMajor> A({kM, kK});
cutlass::HostTensor<float, cutlass::layout::ColumnMajor> B({kK, kN});
cutlass::HostTensor<float, cutlass::layout::RowMajor> C({kM, kN});
cutlass::HostTensor<float, cutlass::layout::RowMajor> D({kM, kN});
uint64_t seed = 2020;
float max = 8;
float min = -8;
std::cout << "Simt canonical GEMM problem size = (" << cutlass::gemm::GemmShape<kM, kN, kK>() <<")" << std::endl;
cutlass::reference::host::TensorFillRandomUniform(
A.host_view(),
seed,
max,
min,
0
);
cutlass::reference::host::TensorFillRandomUniform(
B.host_view(),
seed + 17,
max,
min,
0
);
#if 0 // Debug: fill A sequentially and B as Identity matrix for debugging
cutlass::reference::host::BlockFillSequential(
A.host_view().data(), A.host_view().capacity());
cutlass::reference::host::TensorFillIdentity(B.host_view());
#endif
cutlass::reference::host::TensorFillRandomUniform(
C.host_view(),
seed + 31,
max,
min,
0
);
A.sync_device();
B.sync_device();
C.sync_device();
D.sync_device();
dim3 grid(1, 1);
dim3 block(32, 1, 1);
float alpha = 1.0f;
float beta = 0.0f;
kernel<<< grid, block >>>(
D.device_data(),
alpha,
A.device_data(),
B.device_data(),
beta,
C.device_data()
);
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Failed to synchronize device after kernel launch." << std::endl;
return -1;
}
D.sync_host();
// Compute reference on host
cutlass::HostTensor<float, cutlass::layout::RowMajor> D_ref({kM, kN}, false);
cutlass::reference::host::TensorCopy(D_ref.host_view(), C.host_view());
cutlass::reference::host::Gemm<
float, cutlass::layout::RowMajor,
float, cutlass::layout::ColumnMajor,
float, cutlass::layout::RowMajor,
float, float> reference_gemm;
reference_gemm(
{kM, kN, kK},
alpha,
A.host_ref(),
B.host_ref(),
beta,
D_ref.host_ref(),
float()
);
// Verify reference matches computed
if (!cutlass::reference::host::TensorEquals(
D.host_view(),
D_ref.host_view())) {
std::cerr
<< "A =\n" << A.host_view()
<< "\n\nB = \n" << B.host_view()
<< "\n\nC = " << C.host_view()
<< "\n\nRef =\n" << D_ref.host_view()
<< "\n\nD =\n" << D.host_view() << "\n\n";
std::cerr << "Error - device results mismatch host reference." << std::endl;
return -1;
}
std::cout << "Passed" << std::endl;
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| examples/20_simt_canonical/simt_canonical.cu/0 | {
"file_path": "examples/20_simt_canonical/simt_canonical.cu",
"repo_id": "examples",
"token_count": 4815
} | 5 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief GEMM kernel to support the epilogue visitor model
for customized softmax partial reduction epilogue fusion.
This source file will likely be moved to `include/cutlass/gemm/kernel/` in the future once
its usage has been stabilized. For now, it is included in this example to demonstrate
some basic output fusion options.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/complex.h"
#include "cutlass/semaphore.h"
#include "cutlass/trace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_ ///! Threadblock swizzling function
>
struct GemmWithEpilogueVisitor {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueVisitor = typename Epilogue::Visitor;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using TensorRefA = TensorRef<ElementA, LayoutA>;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using TensorRefB = TensorRef<ElementB, LayoutB>;
using ElementC = typename EpilogueVisitor::ElementOutput;
using LayoutC = typename Epilogue::Layout;
using TensorRefC = TensorRef<ElementC, LayoutC>;
static ComplexTransform const kTransformA = Mma::kTransformA;
static ComplexTransform const kTransformB = Mma::kTransformB;
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
using ElementNorm = typename EpilogueVisitor::ElementNorm;
using ElementSum = typename EpilogueVisitor::ElementSum;
static int const kStages = Mma::kStages;
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = EpilogueVisitor::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Split-K preserves splits that are 128b aligned
static int const kSplitKAlignment = const_max(
128 / sizeof_bits<ElementA>::value,
128 / sizeof_bits<ElementB>::value
);
//
// Structures
//
/// Argument structure
struct Arguments {
//
// Data members
//
GemmUniversalMode mode;
GemmCoord problem_size;
int batch_count;
TensorRefA ref_A;
TensorRefB ref_B;
TensorRefC ref_C;
TensorRefC ref_D;
ElementNorm *ptr_Max;
ElementSum *ptr_Sum;
int64_t batch_stride_A;
int64_t batch_stride_B;
typename EpilogueVisitor::Arguments epilogue_visitor;
//
// Methods
//
Arguments():
mode(GemmUniversalMode::kGemm),
batch_count(1)
{ }
/// constructs an arguments structure
Arguments(
GemmUniversalMode mode_,
GemmCoord problem_size_,
int batch_count_,
TensorRefA ref_A_,
TensorRefB ref_B_,
TensorRefC ref_C_,
TensorRefC ref_D_,
ElementNorm *ptr_Max_,
ElementSum *ptr_Sum_,
int64_t batch_stride_A_,
int64_t batch_stride_B_,
typename EpilogueVisitor::Arguments epilogue_visitor_
):
mode(mode_),
problem_size(problem_size_),
batch_count(batch_count_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
ptr_Max(ptr_Max_),
ptr_Sum(ptr_Sum_),
batch_stride_A(batch_stride_A_),
batch_stride_B(batch_stride_B_),
epilogue_visitor(epilogue_visitor_)
{
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename EpilogueVisitor::OutputTileIterator::Params params_C;
typename EpilogueVisitor::OutputTileIterator::Params params_D;
GemmUniversalMode mode;
int batch_count;
int gemm_k_size;
void * ptr_A;
void * ptr_B;
ElementC * ptr_C;
ElementC * ptr_D;
ElementNorm * ptr_Max;
ElementSum * ptr_Sum;
int64_t batch_stride_A;
int64_t batch_stride_B;
typename EpilogueVisitor::Params epilogue_visitor;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
swizzle_log_tile(0),
params_A(0),
params_B(0),
params_C(0),
params_D(0),
batch_count(0),
gemm_k_size(0),
mode(cutlass::gemm::GemmUniversalMode::kGemm),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_D(nullptr),
ptr_Max(nullptr),
ptr_Sum(nullptr),
batch_stride_A(0),
batch_stride_B(0)
{ }
Params(
Arguments const &args
):
problem_size(args.problem_size),
swizzle_log_tile(0),
params_A(args.ref_A.layout()),
params_B(args.ref_B.layout()),
params_C(args.ref_C.layout()),
params_D(args.ref_D.layout()),
mode(args.mode),
batch_count(args.batch_count),
gemm_k_size(args.problem_size.k()),
ptr_A(args.ref_A.data()),
ptr_B(args.ref_B.data()),
ptr_C(args.ref_C.data()),
ptr_D(args.ref_D.data()),
ptr_Max(args.ptr_Max),
ptr_Sum(args.ptr_Sum),
batch_stride_A(args.batch_stride_A),
batch_stride_B(args.batch_stride_B),
epilogue_visitor(args.epilogue_visitor)
{
ThreadblockSwizzle threadblock_swizzle;
grid_tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.batch_count);
if (args.mode == GemmUniversalMode::kGemm || args.mode == GemmUniversalMode::kGemmSplitKParallel) {
int const kAlignK = const_max(const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value), 1);
gemm_k_size = round_up(ceil_div(args.problem_size.k(), args.batch_count), kAlignK);
if (gemm_k_size) {
grid_tiled_shape.k() = ceil_div(args.problem_size.k(), gemm_k_size);
}
}
swizzle_log_tile = threadblock_swizzle.get_log_tile(grid_tiled_shape);
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
struct {
typename Epilogue::SharedStorage epilogue;
typename EpilogueVisitor::SharedStorage visitor;
} epilogue;
};
public:
//
// Methods
//
CUTLASS_DEVICE
GemmWithEpilogueVisitor() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size) {
CUTLASS_TRACE_HOST("GemmWithEpilogueVisitor::can_implement()");
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
bool isAMisaligned = false;
bool isBMisaligned = false;
bool isCMisaligned = false;
if (platform::is_same<LayoutA, layout::RowMajor>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) {
isAMisaligned = problem_size.m() % kAlignmentA;
} else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) {
isAMisaligned = problem_size.k() % kAlignmentA;
}
if (platform::is_same<LayoutB, layout::RowMajor>::value) {
isBMisaligned = problem_size.n() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
} else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value
|| platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) {
isBMisaligned = problem_size.k() % kAlignmentB;
}
if (platform::is_same<LayoutC, layout::RowMajor>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) {
isCMisaligned = problem_size.m() % kAlignmentC;
} else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value
|| platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) {
isCMisaligned = problem_size.n() % kAlignmentC;
}
if (isAMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for A operand");
return Status::kErrorMisalignedOperand;
}
if (isBMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for B operand");
return Status::kErrorMisalignedOperand;
}
if (isCMisaligned) {
CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for C operand");
return Status::kErrorMisalignedOperand;
}
CUTLASS_TRACE_HOST(" returning kSuccess");
return Status::kSuccess;
}
static Status can_implement(Arguments const &args) {
return can_implement(args.problem_size);
}
#define SPLIT_K_ENABLED 1
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
int offset_k = 0;
int problem_size_k = params.problem_size.k();
ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A);
ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B);
#if SPLIT_K_ENABLED
//
// Fetch pointers based on mode.
//
if (params.mode == GemmUniversalMode::kGemm ||
params.mode == GemmUniversalMode::kGemmSplitKParallel) {
if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) {
problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size;
}
offset_k = threadblock_tile_offset.k() * params.gemm_k_size;
}
else if (params.mode == GemmUniversalMode::kBatched) {
ptr_A += threadblock_tile_offset.k() * params.batch_stride_A;
ptr_B += threadblock_tile_offset.k() * params.batch_stride_B;
}
else if (params.mode == GemmUniversalMode::kArray) {
ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()];
ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()];
}
#endif
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
offset_k,
};
cutlass::MatrixCoord tb_offset_B{
offset_k,
threadblock_tile_offset.n() * Mma::Shape::kN
};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A,
ptr_A,
{params.problem_size.m(), problem_size_k},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
ptr_B,
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute threadblock-scoped matrix multiply-add
mma(
gemm_k_iterations,
accumulators,
iterator_A,
iterator_B,
accumulators);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
//
// Construct the epilogue visitor
//
EpilogueVisitor epilogue_visitor(
params.epilogue_visitor,
shared_storage.epilogue.visitor,
params.problem_size.mn(),
thread_idx,
warp_idx,
lane_idx,
params.params_C,
params.params_D,
params.ptr_C,
params.ptr_D,
params.ptr_Max,
params.ptr_Sum,
threadblock_offset,
blockIdx.y *params.problem_size.m() );
if (params.mode == GemmUniversalMode::kGemm) {
// Indicate which position in a serial reduction the output operator is currently updating
epilogue_visitor.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
else if (params.mode == GemmUniversalMode::kBatched || params.mode == GemmUniversalMode::kArray) {
epilogue_visitor.set_batch_index(threadblock_tile_offset.k());
}
// Construct the epilogue
Epilogue epilogue(
shared_storage.epilogue.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Execute the epilogue operator to update the destination tensor.
epilogue(epilogue_visitor, accumulators);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/35_gemm_softmax/gemm_with_epilogue_visitor.h/0 | {
"file_path": "examples/35_gemm_softmax/gemm_with_epilogue_visitor.h",
"repo_id": "examples",
"token_count": 6415
} | 6 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <vector>
#include <iostream>
#include <fstream>
#include "kernel_backward.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/host_tensor.h"
using Arch = cutlass::arch::Sm80;
static constexpr int kMaxK = 128;
template <typename ArchTag, typename Element, int kMaxK>
struct DefaultKernel {
// Some heuristics to select the best kernel (tested on Sm60, Sm70, Sm80)
// NOTE: Requires quite a lot of shmem for Sm80+,
// so might require tweaking those manually for Sm86/Sm89
static constexpr bool kSupports64x128 =
ArchTag::kMinComputeCapability >= 80 ||
(ArchTag::kMinComputeCapability >= 70 &&
cutlass::sizeof_bits<Element>::value <= 16);
static constexpr int kBlockSizeI = kSupports64x128 && kMaxK > 64 ? 128 : 64;
static constexpr bool kIsHalf = cutlass::sizeof_bits<Element>::value <= 16;
static constexpr bool kOutputInRF = kIsHalf && kMaxK <= kBlockSizeI;
static constexpr bool kPreload = kIsHalf && ArchTag::kMinComputeCapability >= 80 && kOutputInRF;
static constexpr int kBlockSizeJ = kPreload && kMaxK > 64 ? 128 : 64;
using Kernel = AttentionBackwardKernel<
Arch,
Element,
true, // kIsAligned_
false, // kApplyDropout_
kPreload, // kPreload_
kBlockSizeI, // kBlockSizeI_,
kBlockSizeJ, // kBlockSizeJ_,
kMaxK, // kMaxK
false, // kKeysQueriesAlignedToBlockSize
true // kEnableSplitKeys
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace {
template <typename T> struct TypeName;
template <> struct TypeName<float> { static constexpr const char* Name = "f32"; };
template <> struct TypeName<cutlass::half_t> { static constexpr const char* Name = "f16"; };
template <> struct TypeName<cutlass::bfloat16_t> { static constexpr const char* Name = "b16"; };
void readExpect(std::string const& expected) {
std::string read;
std::cin >> read;
if (read != expected) {
std::cerr << "FATAL: Read '" << read << "' but expected '" << expected << "'" << std::endl;
std::exit(1);
}
}
/// Helpers to read from stdin
template <typename Element>
cutlass::HostTensor<Element, cutlass::layout::RowMajor> readTensorOnDevice(std::string const& expectedName) {
readExpect("tensor_begin");
readExpect(std::string(TypeName<Element>::Name) + ":" + expectedName);
uint64_t len = 0;
std::cin >> len;
readExpect("file");
std::string filename;
std::cin >> filename;
cutlass::HostTensor<Element, cutlass::layout::RowMajor> tensor({int64_t(1), int64_t(len / sizeof(Element))});
uint8_t* data = (uint8_t*)tensor.host_data();
std::fstream myFile(filename, std::ios::in | std::ios::binary );
myFile.read((char*)data, len);
readExpect("tensor_end");
tensor.sync_device();
return tensor;
}
int64_t readInt64(std::string const& expectedName) {
readExpect(expectedName);
int64_t s = 0;
std::cin >> s;
return s;
}
float readFloat(std::string const& expectedName) {
readExpect(expectedName);
float s = 0;
std::cin >> s;
return s;
}
// Writing
template <typename Element>
void writeTensor(std::string const& name, cutlass::HostTensor<Element, cutlass::layout::RowMajor>& tensor) {
tensor.sync_host(); // device->host
size_t u8len = tensor.size() * sizeof(Element);
// Python is expected to provide a file name to write to
readExpect("tmpfile");
std::string tmpfile;
std::cin >> tmpfile;
uint8_t* data = (uint8_t*)tensor.host_data();
std::fstream myFile(tmpfile, std::ios::out | std::ios::binary );
myFile.write((char*)data, u8len);
myFile.close();
std::cout << "tensor_begin " << TypeName<Element>::Name << ":" << name << " ";
std::cout << u8len << " file " << tmpfile << " tensor_end" << std::endl;
}
void writeInt64(std::string const& name, int64_t value) {
std::cout << name << " " << value << std::endl;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element>
int runKernel() {
using Kernel = typename DefaultKernel<Arch, Element, kMaxK>::Kernel;
#define READ_I64(NAME) p.NAME = (decltype(p.NAME))readInt64(#NAME)
#define READ_TENSOR_AND_STRIDES_BMH(DT, NAME, NAME_XS) \
auto storage##NAME = readTensorOnDevice<DT>(#NAME); \
p.NAME##_ptr = storage##NAME.device_data(); \
READ_I64(NAME_XS##_strideB); \
READ_I64(NAME_XS##_strideM); \
READ_I64(NAME_XS##_strideH);
#define CUDA_CHECK(FN) { \
auto cudaError = FN; \
if (cudaError != cudaSuccess) { \
std::cerr << "FATAL: " #FN " failed: " << cudaGetErrorString(cudaError) << std::endl; \
return -1; \
} \
}
typename Kernel::Params p;
p.scale = readFloat("scale");
READ_I64(head_dim);
READ_I64(head_dim_value);
READ_I64(num_queries);
READ_I64(num_keys);
READ_I64(num_heads);
READ_I64(custom_mask_type);
READ_I64(num_batches);
int64_t repeat_count = readInt64("repeat_count");
READ_I64(num_splits_key);
READ_TENSOR_AND_STRIDES_BMH(Element, query, q);
READ_TENSOR_AND_STRIDES_BMH(Element, key, k);
READ_TENSOR_AND_STRIDES_BMH(Element, value, v);
auto lse = readTensorOnDevice<typename Kernel::lse_scalar_t>("logsumexp");
p.logsumexp_ptr = lse.device_data();
p.lse_strideB = readInt64("lse_strideB");
p.lse_strideH = readInt64("lse_strideH");
// output
auto stOutput = readTensorOnDevice<Element>("output");
p.output_ptr = stOutput.device_data();
READ_I64(o_strideB);
auto o_strideM = readInt64("o_strideM");
if (o_strideM != p.o_strideM()) {
std::cerr << "Invalid `o_strideM`: " << o_strideM << " - expected " << p.o_strideM();
return 2;
}
READ_I64(o_strideH);
READ_TENSOR_AND_STRIDES_BMH(Element, grad_output, gO);
auto stDelta = readTensorOnDevice<typename Kernel::accum_t>("delta");
p.delta_ptr = stDelta.device_data();
READ_I64(delta_strideB);
READ_I64(delta_strideH);
// Allocate workspace
if (p.workspace_size()) {
cudaMalloc(&p.workspace, p.workspace_size());
}
// Allocate outputs in BMHK format
p.gQKV_strideM_multiplier = 1;
p.gQ_strideH = p.head_dim;
p.gQ_strideB = p.gQ_strideM() * p.num_queries;
p.gK_strideH = p.head_dim;
p.gK_strideB = p.gK_strideM() * p.num_keys;
p.gV_strideH = p.head_dim_value;
p.gV_strideB = p.gV_strideM() * p.num_keys;
cutlass::HostTensor<Element, cutlass::layout::RowMajor> gQ({int64_t(1), p.gQ_strideB * p.num_batches});
cutlass::HostTensor<Element, cutlass::layout::RowMajor> gK({int64_t(1), p.gK_strideB * p.num_batches});
cutlass::HostTensor<Element, cutlass::layout::RowMajor> gV({int64_t(1), p.gV_strideB * p.num_batches});
p.grad_query_ptr = gQ.device_data();
p.grad_key_ptr = gK.device_data();
p.grad_value_ptr = gV.device_data();
if (!Kernel::check_supported(p)) {
std::cerr << "FATAL: Kernel does not support these inputs" << std::endl;
return 2;
}
// Run kernel
cudaDeviceSynchronize();
auto kernel_fn = attention_kernel_backward_batched_impl<Kernel>;
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
CUDA_CHECK(cudaFuncSetAttribute(kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, int(smem_bytes)));
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p);
// Write outputs
std::cout << "OK ";
writeTensor("grad_query", gQ);
writeInt64("gQ_strideB", p.gQ_strideB);
writeInt64("gQ_strideM", p.gQ_strideM());
writeInt64("gQ_strideH", p.gQ_strideH);
writeTensor("grad_key", gK);
writeInt64("gK_strideB", p.gK_strideB);
writeInt64("gK_strideM", p.gK_strideM());
writeInt64("gK_strideH", p.gK_strideH);
writeTensor("grad_value", gV);
writeInt64("gV_strideB", p.gV_strideB);
writeInt64("gV_strideM", p.gV_strideM());
writeInt64("gV_strideH", p.gV_strideH);
// Timing
cudaEvent_t events[2];
for (auto & event : events) {
CUDA_CHECK(cudaEventCreate(&event));
}
CUDA_CHECK(cudaEventRecord(events[0]));
for (int i = 0; i < repeat_count; ++i) {
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p);
}
CUDA_CHECK(cudaEventRecord(events[1]));
CUDA_CHECK(cudaEventSynchronize(events[1]));
// Measure elapsed runtime
float runtime_ms = 0;
CUDA_CHECK(cudaEventElapsedTime(&runtime_ms, events[0], events[1]));
std::cout << "runtime_ms " << runtime_ms / float(repeat_count) << std::endl;
return 0;
}
int main() {
std::ios_base::sync_with_stdio(false);
std::string dtype;
std::cin >> dtype;
std::cerr << "Running kernel with dtype: " << dtype << std::endl;
if (dtype == "f16") {
return runKernel<cutlass::half_t>();
} else if (dtype == "b16") {
return runKernel<cutlass::bfloat16_t>();
} else if (dtype == "f32") {
return runKernel<float>();
} else {
std::cerr << "FATAL: Unknown dtype: " << dtype << std::endl;
return 3;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/41_fused_multi_head_attention/fused_multi_head_attention_backward.cu/0 | {
"file_path": "examples/41_fused_multi_head_attention/fused_multi_head_attention_backward.cu",
"repo_id": "examples",
"token_count": 4331
} | 7 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief This defines a "fragment" iterator for visiting the fragments of an accumulator tile
that participate in one warp-level store operation.
Typically, the accumulator tile is the largest single block of register-backed storage
within the kernel. Storing it to memory is best accomplished by partitioning it into
smaller tiles and storing these sequentially.
Round trips through shared memory during the Epilogue phase require partitioning, as
shared memory capacity is typically insufficient for a threadblock's total accumulator
size.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/epilogue/warp/tensor_op_policy.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
///
template <
typename WarpShape, ///< shape of warp-level GEMM (concept: MatrixShape)
typename OperatorShape, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorElementC, ///< matrix multiply operation data type (concept: data type)
typename OperatorFragmentC, ///< matrix multiply operation fragment (concept: Array)
typename Layout ///< target shared memory layout
>
class FusedBiasActFragmentIteratorTensorOp;
////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for row-major shared memory
template <
typename WarpShape_, ///< shape of the warp-level GEMM tile
typename OperatorShape_, ///< matrix multiply operation shape (concept: gemm::GemmShape)
typename OperatorElementC_, ///< matrix multiply operation data type (concept: data type)
typename OperatorFragmentC_ ///< matrix multiply operation fragment (concept: Array)
>
class FusedBiasActFragmentIteratorTensorOp<WarpShape_, OperatorShape_, OperatorElementC_, OperatorFragmentC_, layout::RowMajor> {
public:
using WarpShape = WarpShape_;
using OperatorShape = OperatorShape_;
using OperatorElementC = OperatorElementC_;
using OperatorFragmentC = OperatorFragmentC_;
using Layout = layout::RowMajor;
using Policy = TensorOpPolicy<WarpShape, OperatorShape, Layout>;
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<
OperatorElementC,
Policy::OperatorCount::kColumn * Policy::kElementsPerAccess>;
/// This is the complete warp-level accumulator tile.
using AccumulatorTile = Array<
OperatorElementC,
OperatorFragmentC::kElements * Policy::OperatorCount::kRow * Policy::OperatorCount::kColumn>;
using OutputAccumulatorTile = AccumulatorTile;
/// Number of times this iterator can be incremented
static int const kIterations = Policy::kIterations;
private:
/// Internal access type
using AccessType = Array<OperatorElementC, Policy::kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType *accumulators_;
/// Internal index
int index_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
FusedBiasActFragmentIteratorTensorOp(AccumulatorTile &accum):
accumulators_(reinterpret_cast<AccessType *>(&accum)),
index_(0) {
}
/// Increments
CUTLASS_HOST_DEVICE
FusedBiasActFragmentIteratorTensorOp &operator++() {
++index_;
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
FusedBiasActFragmentIteratorTensorOp &operator--() {
--index_;
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, int index_offset = 0) const {
int index = index_ + index_offset;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int accumulator_access_offset =
index + n * Policy::kAccumulatorColumnStride / Policy::kElementsPerAccess;
frag_ptr[n] = accumulators_[accumulator_access_offset];
}
}
/// Stores a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void store(Fragment &frag, int index_offset = 0) const {
int index = index_ + index_offset;
AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::OperatorCount::kColumn; ++n) {
int accumulator_access_offset =
index + n * Policy::kAccumulatorColumnStride / Policy::kElementsPerAccess;
accumulators_[accumulator_access_offset] = frag_ptr[n];
}
}
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/warp/fused_bias_act_fragment_iterator_tensor_op.h/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/fixed_impl/epilogue/warp/fused_bias_act_fragment_iterator_tensor_op.h",
"repo_id": "examples",
"token_count": 1960
} | 8 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#define TI(tag) \
cudaEvent_t _event_start_ ##tag; \
cudaEvent_t _event_end_ ##tag; \
float _event_time_ ##tag; \
cudaEventCreate(& _event_start_ ##tag); \
cudaEventCreate(& _event_end_ ##tag); \
cudaEventRecord(_event_start_ ##tag);
#define TO(tag, str, times) \
cudaEventRecord(_event_end_ ##tag); \
cudaEventSynchronize(_event_end_ ##tag); \
cudaEventElapsedTime(&_event_time_ ##tag, _event_start_ ##tag, _event_end_ ##tag); \
float _event_time_once_ ##tag = _event_time_ ##tag / times; \
printf("%20s:\t %10.3fus\t", str, _event_time_once_ ##tag * 1000); \
cudaDeviceSynchronize(); \
printf("%20s string: %s\n",str, cudaGetErrorString(cudaGetLastError()));
template<typename T>
struct memory_unit{
T* host_ptr;
T* device_ptr;
int size_bytes;
int elements;
void h2d(){
cudaMemcpy(device_ptr, host_ptr, size_bytes, cudaMemcpyHostToDevice);
}
void d2h(){
cudaMemcpy(host_ptr, device_ptr, size_bytes, cudaMemcpyDeviceToHost);
}
void free_all(){
free(host_ptr);
cudaFree(device_ptr);
}
memory_unit(int elements_): size_bytes(elements_ * sizeof(T)), elements(elements_){
host_ptr = (T*) malloc(elements_ * sizeof(T));
cudaMalloc((void**)&device_ptr, elements_ * sizeof(T));
}
void init(int abs_range = 1){
for(int i = 0; i < elements; i++){
host_ptr[i] = T(rand() % 100 / float(100) * 2 * abs_range - abs_range);
}
h2d();
}
};
template<typename T>
int check_result(T * a, T * b, int N){
int cnt = 0;
for(int i = 0; i < N; i ++){
float std = float(a[i]);
float my = float(b[i]);
if(abs(std - my) / abs(std) > 1e-2)
{
// printf("my: %f , std: %f\n", my, std);
cnt++;
}
}
printf("total err: %d / %d\n", cnt, N);
return cnt;
}
| examples/44_multi_gemm_ir_and_codegen/utils.h/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/utils.h",
"repo_id": "examples",
"token_count": 1365
} | 9 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/***************************************************************************************************
Example contrasting the Stream-K parallel decomposition for GEMM threadblocks versus the
"classic data-parallel" and "Split-K" decompositions + residual add.
For more details regarding the Stream-K method, see "Stream-K: Work-centric Parallel Decomposition
for Dense Matrix-Matrix Multiplication on the GPU" (https://arxiv.org/abs/2301.03598)
Requires NVIDIA Ampere or newer device (SM80+).
- To lock persistence mode, power (400W), clocks (1005MHz) for evaluation (assumes device 0 and A100)
cutlass$ sudo nvidia-smi -pm 1 -i 0
cutlass$ sudo nvidia-smi -i 0 -pl 400
cutlass$ sudo nvidia-smi -i 0 -lgc 1005
- Build and run:
cutlass$ mkdir build
cutlass$ cd build
cutlass/build$ cmake .. -DCUTLASS_NVCC_ARCHS=80
cutlass/build$ make 47_ampere_gemm_universal_streamk_broadcast
cutlass/build$ ./examples/47_ampere_gemm_universal_streamk/47_ampere_gemm_universal_streamk_broadcast
- Reset clocks when done:
cutlass$ sudo nvidia-smi -rgc
**************************************************************************************************/
#include <iostream>
#include <string>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/gemm/device/gemm_universal_with_broadcast.h"
#include "cutlass/gemm/device/gemm_universal_streamk_with_broadcast.h"
#include "cutlass/epilogue/thread/linear_combination_residual_block.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/error_metrics.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_foreach.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/epilogue/threadblock/fusion/visitors.hpp"
#include "cutlass/gemm/kernel/default_gemm_universal_with_visitor.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM kernel configurations (cutlass_tensorop_h16816gemm_128x128_32x4_nn_align8)
/////////////////////////////////////////////////////////////////////////////////////////////////
// A matrix configuration
using ElementA = cutlass::half_t; // Element type for A matrix operand
using LayoutA = cutlass::layout::RowMajor; // Layout type for A matrix operand
constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; // Memory access granularity/alignment of A matrix in units of elements (up to 16 bytes)
// B matrix configuration
using ElementB = cutlass::half_t; // Element type for B matrix operand
using LayoutB = cutlass::layout::RowMajor; // Layout type for B matrix operand
constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; // Memory access granularity/alignment of B matrix in units of elements (up to 16 bytes)
// C1/C2/D matrix configuration
using ElementC = cutlass::half_t; // Element type for C matrix operands
using LayoutC = cutlass::layout::RowMajor; // Layout type for C matrix operands
constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value; // Memory access granularity/alignment of C matrices in units of elements (up to 16 bytes)
// Output matrix configuration
using ElementOutput = cutlass::half_t; // Element type for output matrix operands
using LayoutOutput = cutlass::layout::RowMajor; // Layout type for output matrix operands
// constexpr int AlignmentOutput = 128 / cutlass::sizeof_bits<ElementOutput>::value; // Memory access granularity/alignment of output matrices in units of elements (up to 16 bytes)
// Multiply-accumulate blocking/pipelining details
using ElementAccumulator = cutlass::half_t; // Element type for internal accumulation
using ElementCompute = cutlass::half_t; // Element type for compute
using ArchTag = cutlass::arch::Sm80; // Tag indicating the minimum SM that supports the intended feature
using OperatorClass = cutlass::arch::OpClassTensorOp; // Operator class tag
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock-level tile size (concept: GemmShape)
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp-level tile size (concept: GemmShape)
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // Instruction-level tile size (concept: GemmShape)
constexpr int NumStages = 4; // Number of global->shared pipeline stages used in the GEMM mainloop
constexpr int EVTEpilogueStages = 1; // Number of epilogue stages in EVT
// Residual block configuration
// Epilogue output operator
/// Using LinearCombinationResidualBlock
/// Models a residual block of the form: UnaryOp(BinaryOp(BinaryOp(ActivationOp(TensorOp(X) + bias), residual1), residual2))
using EpilogueOp = cutlass::epilogue::thread::LinearCombinationResidualBlock<
ElementOutput, // Element type for output matrix
ElementAccumulator, // Element type from internal accumulation
ElementCompute, // Element type from internal accumulation
ElementC, // Element type for C1/C2/D matrix operands
AlignmentC, // Memory access granularity of C and D matrix in units of elements
cutlass::epilogue::thread::Identity, // Activation
cutlass::plus, // Binary operation 1
cutlass::epilogue::thread::Identity, // Unary operation
cutlass::plus // Binary operation 2
>;
// Reference device GEMM implementation type
using DeviceGemmReference = cutlass::reference::device::Gemm<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
ElementAccumulator>;
// Classic data-parallel device GEMM implementation type
using DeviceGemmBasic = cutlass::gemm::device::GemmUniversalWithBroadcast<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
NumStages,
AlignmentA,
AlignmentB>;
// StreamK device GEMM implementation type with EVT
using namespace cute;
using OutputTileThreadMap = cutlass::epilogue::threadblock::OutputTileThreadLayout<
ThreadblockShape,
WarpShape,
ElementC,
AlignmentC,
EVTEpilogueStages
>;
using Accum = cutlass::epilogue::threadblock::VisitorAccFetch;
using Bias = cutlass::epilogue::threadblock::VisitorRowBroadcast<
OutputTileThreadMap, ElementC,
cute::Stride<_0, _1, int32_t> // StrideMNL
>;
using C1 = cutlass::epilogue::threadblock::VisitorAuxLoad<
OutputTileThreadMap, ElementC,
cute::Stride<int64_t, _1, int64_t> // StrideMNL
>;
using C2 = cutlass::epilogue::threadblock::VisitorAuxLoad<
OutputTileThreadMap, ElementC,
cute::Stride<int64_t, _1, int64_t> // StrideMNL
>;
using Compute0 = cutlass::epilogue::threadblock::VisitorCompute<
cutlass::plus, ElementCompute, ElementCompute,
cutlass::FloatRoundStyle::round_to_nearest
>;
using EVTCompute0 = cutlass::epilogue::threadblock::Sm80EVT<
Compute0,
Accum,
Bias>;
using Compute1 = cutlass::epilogue::threadblock::VisitorCompute<
cutlass::plus, ElementCompute, ElementCompute,
cutlass::FloatRoundStyle::round_to_nearest
>;
using EVTCompute1 = cutlass::epilogue::threadblock::Sm80EVT<
Compute1,
EVTCompute0,
C1>;
using Compute2 = cutlass::epilogue::threadblock::VisitorCompute<
cutlass::plus, ElementOutput, ElementCompute,
cutlass::FloatRoundStyle::round_to_nearest
>;
using EVTCompute2 = cutlass::epilogue::threadblock::Sm80EVT<
Compute2,
EVTCompute1,
C2>;
using D = cutlass::epilogue::threadblock::VisitorAuxStore<
OutputTileThreadMap, ElementOutput, cutlass::FloatRoundStyle::round_to_nearest,
cute::Stride<int64_t, _1, int64_t> // StrideMNL
>;
using EVTD = cutlass::epilogue::threadblock::Sm80EVT<
D,
EVTCompute2>;
using EVTKernelStreamK =
typename cutlass::gemm::kernel::DefaultGemmWithVisitor<
ElementA, LayoutA, cutlass::ComplexTransform::kNone, AlignmentA,
ElementB, LayoutB, cutlass::ComplexTransform::kNone, AlignmentB,
ElementC, LayoutC, AlignmentC,
ElementAccumulator,
ElementCompute,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
ThreadblockShape,
WarpShape,
InstructionShape,
EVTD,
cutlass::gemm::threadblock::ThreadblockSwizzleStreamK,
NumStages,
cutlass::arch::OpMultiplyAdd,
EVTEpilogueStages
>::GemmKernel;
using DeviceGemmStreamK = cutlass::gemm::device::GemmUniversalAdapter<EVTKernelStreamK>;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Testbed utility types
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result
{
double avg_runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
Result(
double avg_runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess)
:
avg_runtime_ms(avg_runtime_ms), gflops(gflops), status(status), error(error), passed(true)
{}
};
/// Command line options parsing
struct Options
{
std::string command_name;
bool help;
cutlass::gemm::GemmCoord problem_size;
float alpha;
float beta;
int split_k_factor;
int avail_sms;
int iterations;
bool real;
cutlass::HostTensor<ElementA, LayoutA> tensor_a;
cutlass::HostTensor<ElementB, LayoutB> tensor_b;
cutlass::HostTensor<ElementC, LayoutC> tensor_c1;
cutlass::HostTensor<ElementC, LayoutC> tensor_c2;
cutlass::HostTensor<ElementC, LayoutC> tensor_d;
cutlass::HostTensor<ElementC, LayoutC> tensor_ref_d;
cutlass::HostTensor<ElementC, LayoutC> tensor_Vector;
// cutlass::HostTensor<ElementC, LayoutC> tensor_Tensor;
Options(std::string command_name) :
command_name(command_name),
help(false),
problem_size({2048, 2048, 2048}),
alpha(1.0f),
beta(1.0f),
split_k_factor(1),
avail_sms(-1), // Number of device SMs to use is unlimited
real(false),
iterations(10000)
{}
bool valid() const
{
return true;
}
void parse(int argc, char const **args)
{
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("split", split_k_factor);
cmd.get_cmd_line_argument("iterations", iterations);
real = cmd.check_cmd_line_flag("real");
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const
{
out
<< "Performs a GEMM computation.\n"
<< "\n"
<< "Options:\n"
<< "\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n"
<< " --split=<int> Split-K factor to emulate\n\n"
<< " --real If specified, initializes with real values instead of whole numbers. Errors are to be expected.\n\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n\n";
out
<< "\n\nExamples:\n\n"
<< "$ " << command_name << " --m=1024 --n=512 --k=1024 --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const
{
// Two flops per multiply-add
return 2.0 * double(problem_size.product()) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM evaluation
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Populates a DeviceGemmBasic::Arguments structure from the given commandline options
typename DeviceGemmBasic::Arguments args_from_options(
const DeviceGemmBasic &device_gemm,
const Options &options,
cutlass::HostTensor<ElementA, LayoutA> &tensor_a,
cutlass::HostTensor<ElementB, LayoutB> &tensor_b,
cutlass::HostTensor<ElementC, LayoutC> &tensor_c1,
cutlass::HostTensor<ElementC, LayoutC> &tensor_c2,
cutlass::HostTensor<ElementC, LayoutC> &tensor_d,
cutlass::HostTensor<ElementC, LayoutC> &tensor_Vector /*,
cutlass::HostTensor<ElementC, LayoutC> &tensor_Tensor */
)
{
return typename DeviceGemmBasic::Arguments(
cutlass::gemm::GemmUniversalMode::kGemm, // universal mode
options.problem_size, // problem_size
options.split_k_factor, // batch count / splitk slices
{ // epilogue parameters
ElementAccumulator(options.alpha),
ElementAccumulator(options.beta)
},
tensor_a.device_data(), // ptr_A
tensor_b.device_data(), // ptr_B
tensor_c1.device_data(), // ptr_C1
tensor_c2.device_data(), // ptr_C2
tensor_d.device_data(), // ptr_D
tensor_Vector.device_data(), // ptr_Vector
/* tensor_Tensor.device_data(), */nullptr,// ptr_Tensor
options.problem_size.mk().product(), // batch_stride_A
options.problem_size.nk().product(), // batch_stride_B
options.problem_size.mn().product(), // batch_stride_C1
options.problem_size.mn().product(), // batch_stride_C2
options.problem_size.mn().product(), // batch_stride_D
options.problem_size.mn().product(), // batch_stride_Vector
options.problem_size.mn().product(), // batch_stride_Tensor
tensor_a.layout().stride(0), // stride_a
tensor_b.layout().stride(0), // stride_b
tensor_c1.layout().stride(0), // stride_c1
tensor_c2.layout().stride(0), // stride_c2
tensor_d.layout().stride(0), // stride_d
/*tensor_Vector.layout().stride(0)*/0, // stride_Vector
/*tensor_Tensor.layout().stride(0)*/0); // stride_Tensor
}
/// Populates a DeviceGemmStreamK::Arguments structure from the given commandline options
typename DeviceGemmStreamK::Arguments args_from_options(
const DeviceGemmStreamK &device_gemm,
const Options &options,
cutlass::HostTensor<ElementA, LayoutA> &tensor_a,
cutlass::HostTensor<ElementB, LayoutB> &tensor_b,
cutlass::HostTensor<ElementC, LayoutC> &tensor_c1,
cutlass::HostTensor<ElementC, LayoutC> &tensor_c2,
cutlass::HostTensor<ElementC, LayoutC> &tensor_d,
cutlass::HostTensor<ElementC, LayoutC> &tensor_Vector/*,
cutlass::HostTensor<ElementC, LayoutC> &tensor_Tensor*/
)
{
typename EVTD::Arguments callback_args{
{
{
{
{}, // Accum
{tensor_Vector.device_data(), ElementC(0), {_0{}, _1{}, int32_t(options.problem_size.n())}}, // Bias
{} // Compute0
}, // EVTCompute0
{tensor_c1.device_data(), ElementC(0), {options.problem_size.n(), _1{}, options.problem_size.mn().product()}}, // C1
{} // Compute1
}, // EVTCompute1
{tensor_c2.device_data(), ElementC(0), {options.problem_size.n(), _1{}, options.problem_size.mn().product()}}, // C2
{} // Compute2
}, // EVTCompute2
{tensor_d.device_data(), {options.problem_size.n(), _1{}, options.problem_size.mn().product()}}, // D
}; // EVTD
return typename DeviceGemmStreamK::Arguments(
cutlass::gemm::GemmUniversalMode::kGemm, // universal mode
options.problem_size, // problem_size
options.split_k_factor, // batch count / splitk slices
callback_args, // argument of EVT callbacks
tensor_a.device_data(), // ptr_A
tensor_b.device_data(), // ptr_B
nullptr, // ptr_C (unused)
nullptr, // ptr_D (unused)
options.problem_size.mk().product(), // batch_stride_A
options.problem_size.nk().product(), // batch_stride_B
0, // batch_stride_C (unused)
0, // batch_stride_D (unused)
tensor_a.layout().stride(0), // stride_a
tensor_b.layout().stride(0), // stride_b
0, // stride_c (unused)
0, // stride_d (unused)
options.avail_sms); // avail_sms
}
/// Execute a given example GEMM computation
template <typename DeviceGemmT>
Result run(std::string description, Options &options)
{
// Display test description
std::cout << std::endl << description << std::endl;
// Zero-initialize test output matrix D
cutlass::reference::host::TensorFill(options.tensor_d.host_view());
options.tensor_d.sync_device();
// Instantiate CUTLASS kernel depending on templates
DeviceGemmT device_gemm;
// Create a structure of gemm kernel arguments suitable for invoking an instance of DeviceGemmT
auto arguments = args_from_options(device_gemm, options,
options.tensor_a, options.tensor_b, options.tensor_c1, options.tensor_c2, options.tensor_d,
options.tensor_Vector/*, options.tensor_Tensor*/);
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = DeviceGemmT::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check the problem size is supported or not
CUTLASS_CHECK(device_gemm.can_implement(arguments));
// Initialize CUTLASS kernel with arguments and workspace pointer
CUTLASS_CHECK(device_gemm.initialize(arguments, workspace.get()));
// Correctness / Warmup iteration
CUTLASS_CHECK(device_gemm());
// Copy output data from CUTLASS and reference kernel to host for comparison
options.tensor_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
Result result;
result.passed = cutlass::reference::host::TensorEquals(
options.tensor_d.host_view(),
options.tensor_ref_d.host_view());
double err = cutlass::reference::host::TensorRelativeErrorMetric(
options.tensor_d.host_view(),
options.tensor_ref_d.host_view());
std::cout << " Disposition: " << (result.passed ? "Passed" : "Failed") << " \t Relative error: " << err << std::endl;
// Run profiling loop
if (options.iterations > 0)
{
GpuTimer timer;
timer.start();
for (int iter = 0; iter < options.iterations; ++iter) {
CUTLASS_CHECK(device_gemm());
}
timer.stop();
// Compute average runtime and GFLOPs.
float elapsed_ms = timer.elapsed_millis();
result.avg_runtime_ms = double(elapsed_ms) / double(options.iterations);
result.gflops = options.gflops(result.avg_runtime_ms / 1000.0);
std::cout << " Avg runtime: " << result.avg_runtime_ms << " ms" << std::endl;
std::cout << " GFLOPs: " << result.gflops << std::endl;
}
// TODO: uncomment when results match
//if (!result.passed) {
// exit(-1);
//}
return result;
}
/// Program entrypoint
int main(int argc, const char **argv)
{
// CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
// Current device must must have compute capability at least 80
cudaDeviceProp props;
int current_device_id;
CUDA_CHECK(cudaGetDevice(¤t_device_id));
CUDA_CHECK(cudaGetDeviceProperties(&props, current_device_id));
if (!((props.major * 10 + props.minor) >= 80))
{
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
// Parse commandline options
Options options("ampere_streamk_broadcast_gemm");
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
std::cout <<
options.iterations << " timing iterations of " <<
options.problem_size.m() << " x " <<
options.problem_size.n() << " x " <<
options.problem_size.k() << " matrix-matrix multiply" << std::endl;
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
//
// Initialize GEMM datasets
//
// Initialize tensors using CUTLASS helper functions
options.tensor_a.resize(options.problem_size.mk()); // <- Create matrix A with dimensions M x K
options.tensor_b.resize(options.problem_size.kn()); // <- Create matrix B with dimensions K x N
options.tensor_c1.resize(options.problem_size.mn()); // <- Create matrix C1 with dimensions M x N
options.tensor_c2.resize(options.problem_size.mn()); // <- Create matrix C2 with dimensions M x N
options.tensor_d.resize(options.problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from CUTLASS kernel
options.tensor_ref_d.resize(options.problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from reference kernel
options.tensor_Vector.resize({1, options.problem_size.n()}); // <- Create broadcast vector with dimensions N x 1
// options.tensor_Tensor.resize(options.problem_size.mn()); // <- Create T matrix with dimensions M x N
int _init_bits = options.real ? -1 : 0;
// Fill matrix A on host with uniform-random data [-2, 2]
cutlass::reference::host::TensorFillRandomUniform(
options.tensor_a.host_view(),
1,
ElementA(2),
ElementA(-2), _init_bits);
// Fill matrix B on host with uniform-random data [-2, 2]
cutlass::reference::host::TensorFillRandomUniform(
options.tensor_b.host_view(),
1,
ElementB(2),
ElementB(-2), _init_bits);
// Fill matrix C1 on host with uniform-random data [-2, 2]
cutlass::reference::host::TensorFillRandomUniform(
options.tensor_c1.host_view(),
1,
ElementC(2),
ElementC(-2), _init_bits);
// Fill matrix C2 on host with uniform-random data [-2, 2]
cutlass::reference::host::TensorFillRandomUniform(
options.tensor_c2.host_view(),
1,
ElementC(2),
ElementC(-2), _init_bits);
cutlass::reference::host::TensorFillRandomUniform(
options.tensor_Vector.host_view(),
1,
ElementC(2),
ElementC(-2), _init_bits);
//
// Compute reference output
//
// Copy data from host to GPU
options.tensor_a.sync_device();
options.tensor_b.sync_device();
options.tensor_c1.sync_device();
options.tensor_c2.sync_device();
options.tensor_Vector.sync_device();
// options.tensor_Tensor.sync_device();
// Zero-initialize reference output matrix D
cutlass::reference::host::TensorFill(options.tensor_ref_d.host_view());
options.tensor_ref_d.sync_device();
// Create instantiation for device reference gemm kernel
DeviceGemmReference gemm_reference;
// Launch device reference gemm kernel
gemm_reference(
options.problem_size,
ElementAccumulator(options.alpha),
options.tensor_a.device_ref(),
options.tensor_b.device_ref(),
ElementAccumulator(options.beta),
options.tensor_c1.device_ref(),
options.tensor_ref_d.device_ref());
// Wait for kernels to finish
CUDA_CHECK(cudaDeviceSynchronize());
// Copy output data from reference kernel to host for comparison
options.tensor_ref_d.sync_host();
// Add broadcast vector (without multiplier)
// This is only possible because BinaryOp is addition, and UnaryOps are identity.
// This makes the addition of broadcast vector commutable.
/// identity(plus(identity(alpha * (a * b) + v), beta * c)) ==
/// alpha * a * b + v + beta * c ==
/// (alpha * a * b + beta * c) + v ==
/// GEMM(a, b, c) + v
// Vector broadcast on host
for (int i=0; i < options.problem_size.m(); ++i) {
for (int j=0; j < options.problem_size.n(); ++j) {
options.tensor_ref_d.host_view().ref().at({i, j}) += options.tensor_Vector.host_view().ref().at({0, j});
options.tensor_ref_d.host_view().ref().at({i, j}) += options.tensor_c2.host_view().ref().at({i, j});
}
}
// Sync back with device just in case
options.tensor_ref_d.sync_device();
//
// Evaluate CUTLASS kernels
//
// Test default operation
if (options.split_k_factor == 1)
{
// Compare basic data-parallel version versus StreamK version using default load-balancing heuristics
Result basic_dp = run<DeviceGemmBasic>("Basic data-parallel GEMM", options);
Result streamk_default = run<DeviceGemmStreamK>("StreamK GEMM with default load-balancing", options);
printf(" Speedup vs Basic-DP: %.3f\n", (basic_dp.avg_runtime_ms / streamk_default.avg_runtime_ms));
// Show that StreamK can emulate basic data-parallel GEMM when we set the number of SMs to load-balance across = 1
options.avail_sms = 1; // Set loadbalancing width to 1 SM (no load balancing)
Result streamk_dp = run<DeviceGemmStreamK>("StreamK emulating basic data-parallel GEMM", options);
options.avail_sms = -1; // Reset loadbalancing width to unspecified SMs (i.e., the number of device SMs)
printf(" Speedup vs Basic-DP: %.3f\n", (basic_dp.avg_runtime_ms / streamk_dp.avg_runtime_ms));
options.split_k_factor++; // Increment splitting factor for next evaluation
}
// Show that StreamK can emulate "Split-K" with a tile-splitting factor
Result basic_splitk = run<DeviceGemmBasic>(
std::string("Basic split-K GEMM with tile-splitting factor ") + std::to_string(options.split_k_factor),
options);
Result streamk_splitk = run<DeviceGemmStreamK>(
std::string("StreamK emulating Split-K GEMM with tile-splitting factor ") + std::to_string(options.split_k_factor),
options);
printf(" Speedup vs Basic-SplitK: %.3f\n", (basic_splitk.avg_runtime_ms / streamk_splitk.avg_runtime_ms));
return 0;
}
| examples/47_ampere_gemm_universal_streamk/ampere_gemm_universal_streamk_broadcast.cu/0 | {
"file_path": "examples/47_ampere_gemm_universal_streamk/ampere_gemm_universal_streamk_broadcast.cu",
"repo_id": "examples",
"token_count": 12478
} | 10 |
/***************************************************************************************************
* Copyright (c) 2024 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include "cutlass/cluster_launch.hpp"
#include "cutlass/arch/barrier.h"
#include "cutlass/pipeline/sm90_pipeline.hpp"
#include "cutlass/util/print_error.hpp"
#include "cutlass/util/GPU_Clock.hpp"
#include "cutlass/util/helper_cuda.hpp"
#include "cutlass/arch/mma_sm90.h"
#include "cutlass/device_kernel.h"
using namespace cute;
template <class ElementA,
class ElementB,
class SmemLayoutA, // (M,K,P)
class SmemLayoutB> // (N,K,P)
struct SharedStorage
{
array_aligned<ElementA, cosize_v<SmemLayoutA>> smem_A;
array_aligned<ElementB, cosize_v<SmemLayoutB>> smem_B;
uint64_t tma_barrier[size<2>(SmemLayoutA{})];
uint64_t mma_barrier[size<2>(SmemLayoutA{})];
};
template <class ProblemShape, class CtaTiler,
class TA, class SmemLayoutA, class TmaA,
class TB, class SmemLayoutB, class TmaB,
class TC, class CStride, class TiledMma,
class Alpha, class Beta>
__global__ static
__launch_bounds__(decltype(size(TiledMma{}))::value)
void
gemm_device(ProblemShape shape_MNK, CtaTiler cta_tiler,
TA const* A, CUTLASS_GRID_CONSTANT TmaA const tma_a,
TB const* B, CUTLASS_GRID_CONSTANT TmaB const tma_b,
TC * C, CStride dC, TiledMma mma,
Alpha alpha, Beta beta)
{
// Preconditions
CUTE_STATIC_ASSERT_V(rank(shape_MNK) == Int<3>{}); // (M, N, K)
CUTE_STATIC_ASSERT_V(rank(cta_tiler) == Int<3>{}); // (BLK_M, BLK_N, BLK_K)
static_assert(is_static<SmemLayoutA>::value);
static_assert(is_static<SmemLayoutB>::value);
CUTE_STATIC_ASSERT_V(size<0>(SmemLayoutA{}) == size<0>(cta_tiler)); // BLK_M
CUTE_STATIC_ASSERT_V(size<0>(SmemLayoutB{}) == size<1>(cta_tiler)); // BLK_N
CUTE_STATIC_ASSERT_V(size<1>(SmemLayoutA{}) == size<2>(cta_tiler)); // BLK_K
CUTE_STATIC_ASSERT_V(size<1>(SmemLayoutB{}) == size<2>(cta_tiler)); // BLK_K
CUTE_STATIC_ASSERT_V(congruent(select<0,1>(shape_MNK), dC)); // dC strides for shape MN
//
// Full and Tiled Tensors
//
// Represent the full tensors
auto [M, N, K] = shape_MNK;
Tensor mA = tma_a.get_tma_tensor(make_shape(M,K)); // (M,K) TMA Tensor
Tensor mB = tma_b.get_tma_tensor(make_shape(N,K)); // (N,K) TMA Tensor
Tensor mC = make_tensor(make_gmem_ptr(C), make_shape(M,N), dC); // (M,N)
// Get the appropriate blocks for this thread block
auto cta_coord = make_coord(blockIdx.x, blockIdx.y, _); // (m,n,k)
Tensor gA = local_tile(mA, cta_tiler, cta_coord, Step<_1, X,_1>{}); // (BLK_M,BLK_K,k)
Tensor gB = local_tile(mB, cta_tiler, cta_coord, Step< X,_1,_1>{}); // (BLK_N,BLK_K,k)
Tensor gC = local_tile(mC, cta_tiler, cta_coord, Step<_1,_1, X>{}); // (BLK_M,BLK_N)
// Shared memory tensors
extern __shared__ char shared_memory[];
using SharedStorage = SharedStorage<TA, TB, SmemLayoutA, SmemLayoutB>;
SharedStorage& smem = *reinterpret_cast<SharedStorage*>(shared_memory);
Tensor sA = make_tensor(make_smem_ptr(smem.smem_A.data()), SmemLayoutA{}); // (BLK_M,BLK_K,PIPE)
Tensor sB = make_tensor(make_smem_ptr(smem.smem_B.data()), SmemLayoutB{}); // (BLK_N,BLK_K,PIPE)
//
// Partition the copying of A and B tiles
//
// TUTORIAL:
// These are TMA partitionings, which have a dedicated custom partitioner.
// The Int<0>, Layout<_1> indicates that the TMAs are not multicasted.
// Any multicasting must be in conformance with tma_x constructed with make_tma_atom on host.
// The group_modes<0,2> transforms the (X,Y,Z)-shaped tensors into ((X,Y),Z)-shaped tensors
// with the understanding that the TMA is responsible for everything in mode-0.
// The tma_partition reorders and offsets mode-0 according to the tma_x atom and the multicast info.
//
auto [tAgA, tAsA] = tma_partition(tma_a, Int<0>{}, Layout<_1>{},
group_modes<0,2>(sA), group_modes<0,2>(gA)); // (TMA,k) and (TMA,PIPE)
auto [tBgB, tBsB] = tma_partition(tma_b, Int<0>{}, Layout<_1>{},
group_modes<0,2>(sB), group_modes<0,2>(gB)); // (TMA,k) and (TMA,PIPE)
// The TMA is responsible for copying everything in mode-0 of tAsA and tBsB
constexpr int kTmaTransactionBytes = CUTE_STATIC_V(size<0>(tAsA)) * sizeof(TA) +
CUTE_STATIC_V(size<0>(tBsB)) * sizeof(TB);
//
// PREFETCH
//
auto K_PIPE_MAX = size<1>(tAsA);
// Total count of tiles
int k_tile_count = size<1>(tAgA);
// Current tile index in gmem to read from
int k_tile = 0;
// Initialize Barriers
int warp_idx = cutlass::canonical_warp_idx_sync();
int lane_predicate = cute::elect_one_sync();
uint64_t* producer_mbar = smem.tma_barrier;
uint64_t* consumer_mbar = smem.mma_barrier;
using ProducerBarType = cutlass::arch::ClusterTransactionBarrier; // TMA
using ConsumerBarType = cutlass::arch::ClusterBarrier; // MMA
CUTE_UNROLL
for (int pipe = 0; pipe < K_PIPE_MAX; ++pipe) {
if ((warp_idx == 0) && lane_predicate) {
ProducerBarType::init(&producer_mbar[pipe], 1);
ConsumerBarType::init(&consumer_mbar[pipe], 128);
}
}
// Ensure barrier init is complete on all CTAs
cluster_sync();
// Start async loads for all pipes
CUTE_UNROLL
for (int pipe = 0; pipe < K_PIPE_MAX; ++pipe)
{
if ((warp_idx == 0) && lane_predicate)
{
// Set expected Tx Bytes after each reset / init
ProducerBarType::arrive_and_expect_tx(&producer_mbar[pipe], kTmaTransactionBytes);
copy(tma_a.with(producer_mbar[pipe]), tAgA(_,k_tile), tAsA(_,pipe));
copy(tma_b.with(producer_mbar[pipe]), tBgB(_,k_tile), tBsB(_,pipe));
}
--k_tile_count;
++k_tile;
}
//
// Define A/B partitioning and C accumulators
//
// TUTORIAL:
// The tCrA and tCrB are actually Tensors of MMA Descriptors constructed as views of SMEM.
// The MMA Descriptor generation is automatic via inspection and validation of the SMEM Layouts.
// Because the MMA reads directly from SMEM and the fragments are descriptors rather than registers,
// there is no need for copy(tCsA, tCrA) in the mainloop.
//
ThrMMA thr_mma = mma.get_thread_slice(threadIdx.x);
Tensor tCsA = thr_mma.partition_A(sA); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCsB = thr_mma.partition_B(sB); // (MMA,MMA_N,MMA_K,PIPE)
Tensor tCgC = thr_mma.partition_C(gC); // (MMA,MMA_M,MMA_N)
// Allocate accumulators and clear them
Tensor tCrC = thr_mma.make_fragment_C(tCgC); // (MMA,MMA_M,MMA_N)
clear(tCrC);
// Allocate "fragments"
Tensor tCrA = thr_mma.make_fragment_A(tCsA); // (MMA,MMA_M,MMA_K,PIPE)
Tensor tCrB = thr_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K,PIPE)
//
// PIPELINED MAIN LOOP
//
// TUTORIAL:
// Rather than interleaving the stages and instructions like in SM70 and SM80,
// the SM90 mainloops rely on explicit producer-consumer synchronization
// on the purely async instructions TMA and MMA.
// More advanced pipeline and warp-specialization strategies are available in CUTLASS mainloops.
//
// A PipelineState is a circular pipe index [.index()] and a pipe phase [.phase()]
// that flips each cycle through K_PIPE_MAX.
auto write_state = cutlass::PipelineState<K_PIPE_MAX>(); // TMA writes
auto read_state = cutlass::PipelineState<K_PIPE_MAX>(); // MMA reads
CUTE_NO_UNROLL
while (k_tile_count > -K_PIPE_MAX)
{
// Wait for Producer to complete
int read_pipe = read_state.index();
ProducerBarType::wait(&producer_mbar[read_pipe], read_state.phase());
// MMAs to cover 1 K_TILE
warpgroup_arrive();
gemm(mma, tCrA(_,_,_,read_pipe), tCrB(_,_,_,read_pipe), tCrC); // (V,M) x (V,N) => (V,M,N)
warpgroup_commit_batch();
// Wait for all MMAs in a K_TILE to complete
warpgroup_wait<0>();
// Notify that consumption is done
ConsumerBarType::arrive(&consumer_mbar[read_pipe]);
++read_state;
if ((warp_idx == 0) && lane_predicate)
{
int pipe = write_state.index();
// Wait for Consumer to complete consumption
ConsumerBarType::wait(&consumer_mbar[pipe], write_state.phase());
// Set expected Tx Bytes after each reset / init
ProducerBarType::arrive_and_expect_tx(&producer_mbar[pipe], kTmaTransactionBytes);
copy(tma_a.with(producer_mbar[pipe]), tAgA(_,k_tile), tAsA(_,pipe));
copy(tma_b.with(producer_mbar[pipe]), tBgB(_,k_tile), tBsB(_,pipe));
++write_state;
}
--k_tile_count;
++k_tile;
}
//
// Epilogue (unpredicated)
//
axpby(alpha, tCrC, beta, tCgC);
}
// Setup params for an NT GEMM
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm_nt(int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
// Define shapes (dynamic)
auto M = int(m);
auto N = int(n);
auto K = int(k);
auto prob_shape = make_shape(M, N, K); // (M, N, K)
// Define TN strides (mixed)
auto dA = make_stride(Int<1>{}, ldA); // (dM, dK)
auto dB = make_stride(Int<1>{}, ldB); // (dN, dK)
auto dC = make_stride(Int<1>{}, ldC); // (dM, dN)
// Define CTA tile sizes (static)
auto bM = Int<128>{};
auto bN = Int<128>{};
auto bK = Int< 64>{};
auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K)
auto bP = Int< 3>{}; // Pipeline
// Define the smem layouts (static)
auto sA = tile_to_shape(GMMA::Layout_MN_SW128_Atom<TA>{}, make_shape(bM,bK,bP));
auto sB = tile_to_shape(GMMA::Layout_MN_SW128_Atom<TB>{}, make_shape(bN,bK,bP));
// Define the MMA
TiledMMA tiled_mma = make_tiled_mma(SM90_64x64x16_F16F16F16_SS<GMMA::Major::MN,GMMA::Major::MN>{});
// Define the TMAs
// Create Global memory tensors for TMA inspection
Tensor mA = make_tensor(A, make_shape(M,K), dA);
Tensor mB = make_tensor(B, make_shape(N,K), dB);
// Create TMA Atoms with the desired copy operation on the source and destination
Copy_Atom tmaA = make_tma_atom(SM90_TMA_LOAD{}, mA, sA(_,_,0), make_shape(bM,bK));
Copy_Atom tmaB = make_tma_atom(SM90_TMA_LOAD{}, mB, sB(_,_,0), make_shape(bN,bK));
//
// Setup and Launch
//
// Launch parameter setup
int smem_size = int(sizeof(SharedStorage<TA, TB, decltype(sA), decltype(sB)>));
dim3 dimBlock(size(tiled_mma));
dim3 dimCluster(2, 1, 1);
dim3 dimGrid(round_up(size(ceil_div(m, bM)), dimCluster.x),
round_up(size(ceil_div(n, bN)), dimCluster.y));
cutlass::ClusterLaunchParams params = {dimGrid, dimBlock, dimCluster, smem_size};
void const* kernel_ptr = reinterpret_cast<void const*>(
&gemm_device<decltype(prob_shape), decltype(cta_tiler),
TA, decltype(sA), decltype(tmaA),
TB, decltype(sB), decltype(tmaB),
TC, decltype(dC), decltype(tiled_mma),
decltype(alpha), decltype(beta)>);
CUTE_CHECK_ERROR(cudaFuncSetAttribute(
kernel_ptr,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size));
// Kernel Launch
cutlass::Status status = cutlass::launch_kernel_on_cluster(params, kernel_ptr,
prob_shape, cta_tiler,
A, tmaA,
B, tmaB,
C, dC, tiled_mma,
alpha, beta);
CUTE_CHECK_LAST();
if (status != cutlass::Status::kSuccess) {
std::cerr << "Error: Failed at kernel Launch" << std::endl;
}
}
// Setup params for a TN GEMM
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm_tn(int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
// Define shapes (dynamic)
auto M = int(m);
auto N = int(n);
auto K = int(k);
auto prob_shape = make_shape(M, N, K); // (M, N, K)
// Define TN strides (mixed)
auto dA = make_stride(ldA, Int<1>{}); // (dM, dK)
auto dB = make_stride(ldB, Int<1>{}); // (dN, dK)
auto dC = make_stride(Int<1>{}, ldC); // (dM, dN)
// Define CTA tile sizes (static)
auto bM = Int<128>{};
auto bN = Int<128>{};
auto bK = Int< 64>{};
auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K)
auto bP = Int<3>{}; // Pipeline
// Define the smem layouts (static)
auto sA = tile_to_shape(GMMA::Layout_K_SW128_Atom<TA>{}, make_shape(bM,bK,bP));
auto sB = tile_to_shape(GMMA::Layout_K_SW128_Atom<TB>{}, make_shape(bN,bK,bP));
// Define the MMA
TiledMMA tiled_mma = make_tiled_mma(SM90_64x64x16_F16F16F16_SS<GMMA::Major::K,GMMA::Major::K>{});
// Define the TMAs
// Create Global memory tensors for TMA inspection
Tensor mA = make_tensor(A, make_shape(M,K), dA);
Tensor mB = make_tensor(B, make_shape(N,K), dB);
// Create TMA Atoms with the desired copy operation on the source and destination
Copy_Atom tmaA = make_tma_atom(SM90_TMA_LOAD{}, mA, sA(_,_,0), make_shape(bM,bK));
Copy_Atom tmaB = make_tma_atom(SM90_TMA_LOAD{}, mB, sB(_,_,0), make_shape(bN,bK));
//
// Setup and Launch
//
// Launch parameter setup
int smem_size = int(sizeof(SharedStorage<TA, TB, decltype(sA), decltype(sB)>));
dim3 dimBlock(size(tiled_mma));
dim3 dimCluster(2, 1, 1);
dim3 dimGrid(round_up(size(ceil_div(m, bM)), dimCluster.x),
round_up(size(ceil_div(n, bN)), dimCluster.y));
cutlass::ClusterLaunchParams params = {dimGrid, dimBlock, dimCluster, smem_size};
void const* kernel_ptr = reinterpret_cast<void const*>(
&gemm_device<decltype(prob_shape), decltype(cta_tiler),
TA, decltype(sA), decltype(tmaA),
TB, decltype(sB), decltype(tmaB),
TC, decltype(dC), decltype(tiled_mma),
decltype(alpha), decltype(beta)>);
CUTE_CHECK_ERROR(cudaFuncSetAttribute(
kernel_ptr,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size));
// Kernel Launch
cutlass::Status status = cutlass::launch_kernel_on_cluster(params, kernel_ptr,
prob_shape, cta_tiler,
A, tmaA,
B, tmaB,
C, dC, tiled_mma,
alpha, beta);
CUTE_CHECK_LAST();
if (status != cutlass::Status::kSuccess) {
std::cerr << "Error: Failed at kernel Launch" << std::endl;
}
}
template <class TA, class TB, class TC,
class Alpha, class Beta>
void
gemm(char transA, char transB, int m, int n, int k,
Alpha alpha,
TA const* A, int ldA,
TB const* B, int ldB,
Beta beta,
TC * C, int ldC,
cudaStream_t stream = 0)
{
if (transA == 'N' && transB == 'T') {
return gemm_nt(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream);
} else
if (transA == 'T' && transB == 'N') {
return gemm_tn(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream);
}
assert(false && "Not implemented");
}
int main(int argc, char** argv)
{
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (props.major != 9) {
std::cout << "This example requires NVIDIA's Hopper Architecture GPU with compute capability 90a\n" << std::endl;
return 0;
}
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
int m = 512;
if (argc >= 2)
sscanf(argv[1], "%d", &m);
int n = 256;
if (argc >= 3)
sscanf(argv[2], "%d", &n);
int k = 1024;
if (argc >= 4)
sscanf(argv[3], "%d", &k);
char transA = 'N';
if (argc >= 5)
sscanf(argv[4], "%c", &transA);
char transB = 'T';
if (argc >= 6)
sscanf(argv[5], "%c", &transB);
using TA = cute::half_t;
using TB = cute::half_t;
using TC = cute::half_t;
using TI = cute::half_t;
TI alpha = TI(1.0f);
TI beta = TI(0.0f);
thrust::host_vector<TA> h_A(m*k);
thrust::host_vector<TB> h_B(n*k);
thrust::host_vector<TC> h_C(m*n);
// Initialize the tensors
for (int j = 0; j < m*k; ++j) h_A[j] = TA(int((rand() % 2) ? 1 : -1));
for (int j = 0; j < n*k; ++j) h_B[j] = TB(int((rand() % 2) ? 1 : -1));
for (int j = 0; j < m*n; ++j) h_C[j] = TC(0);
thrust::device_vector<TA> d_A = h_A;
thrust::device_vector<TB> d_B = h_B;
thrust::device_vector<TC> d_C = h_C;
double gflops = (2.0*m*n*k) * 1e-9;
const int timing_iterations = 100;
GPU_Clock timer;
int ldA = 0, ldB = 0, ldC = m;
if (transA == 'N') {
ldA = m;
} else if (transA == 'T') {
ldA = k;
} else {
assert(false);
}
if (transB == 'N') {
ldB = k;
} else if (transB == 'T') {
ldB = n;
} else {
assert(false);
}
// Run once
d_C = h_C;
gemm(transA, transB, m, n, k,
alpha,
d_A.data().get(), ldA,
d_B.data().get(), ldB,
beta,
d_C.data().get(), ldC);
CUTE_CHECK_LAST();
thrust::host_vector<TC> cute_result = d_C;
// Timing iterations
timer.start();
for (int i = 0; i < timing_iterations; ++i) {
gemm(transA, transB, m, n, k,
alpha,
d_A.data().get(), ldA,
d_B.data().get(), ldB,
beta,
d_C.data().get(), ldC);
}
double cute_time = timer.seconds() / timing_iterations;
CUTE_CHECK_LAST();
printf("CUTE_GEMM: [%6.1f]GFlop/s (%6.4f)ms\n", gflops / cute_time, cute_time*1000);
#else
std::cout << "CUTLASS_ARCH_MMA_SM90_SUPPORTED must be enabled, but it is not. Test is waived \n" << std::endl;
#endif
return 0;
}
| examples/cute/tutorial/wgmma_sm90.cu/0 | {
"file_path": "examples/cute/tutorial/wgmma_sm90.cu",
"repo_id": "examples",
"token_count": 9640
} | 11 |
/***************************************************************************************************
* Copyright (c) 2024 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/tensor_impl.hpp>
#include <cute/atom/copy_atom.hpp>
namespace cute
{
//
// Prefetch global tensors into L2
//
template <uint32_t NumThreads, uint32_t FetchBytes = 64,
class GEngine, class GLayout>
CUTE_HOST_DEVICE
void
cooperative_prefetch(uint32_t const& tid,
Tensor<GEngine, GLayout> const& src)
{
static_assert(is_gmem<GEngine>::value, "Expected global tensor for prefetch");
constexpr int V = decltype(max_common_vector(src, src))::value;
if constexpr (V > 1) {
// L2 sector is 32B, default fetch granularity is 64B
using VecType = conditional_t<(V * sizeof_bits_v<typename GEngine::value_type>) < (FetchBytes * 8),
ArrayEngine<typename GEngine::value_type, V>,
uint8_t[FetchBytes] >;
Tensor src_v = recast<VecType const>(src);
CUTE_UNROLL
for (int i = tid; i < size(src_v); i += NumThreads) {
prefetch(raw_pointer_cast(&src_v(i)));
}
} else {
CUTE_UNROLL
for (int i = tid; i < size(src); i += NumThreads) {
prefetch(raw_pointer_cast(&src(i)));
}
}
}
template <class GEngine, class GLayout>
CUTE_HOST_DEVICE
void
prefetch(Tensor<GEngine, GLayout> const& src)
{
return cooperative_prefetch<1>(0, src);
}
// Prefetch with copy atom
namespace detail {
template <class CopyOp, class = void>
constexpr bool has_prefetch = false;
template <class CopyOp>
constexpr bool has_prefetch<CopyOp, void_t<typename CopyOp::PREFETCH>> = true;
} // end namespace detail
template <class CopyOp, class... CT_Args, class... CA_Args,
class GEngine, class GLayout>
CUTE_HOST_DEVICE
void
prefetch(Copy_Atom<Copy_Traits<CopyOp, CT_Args...>, CA_Args...> const& atom,
Tensor<GEngine, GLayout> const& src)
{
if constexpr (detail::has_prefetch<CopyOp>) {
using Prefetch_Traits = Copy_Traits<typename CopyOp::PREFETCH, CT_Args...>;
using Prefetch_Atom = Copy_Atom<Prefetch_Traits, CA_Args...>;
Prefetch_Atom prefetch_atom{atom};
auto& dst = const_cast<Tensor<GEngine, GLayout>&>(src); // dst is ignored for prefetch atoms
return copy(prefetch_atom, src, dst);
} else {
return prefetch(src);
}
}
#if defined(CUTE_COPY_ATOM_TMA_SM90_ENABLED)
template <class... CT_Args,
class SrcEngine, class SrcLayout>
CUTE_HOST_DEVICE
void
prefetch(Copy_Traits<SM90_BULK_COPY_AUTO, CT_Args...> const& atom,
Tensor<SrcEngine, SrcLayout> const& src)
{
using SrcType = typename SrcEngine::value_type;
static_assert(is_gmem<SrcEngine>::value, "Expected global tensor for L2 prefetch");
auto tiler = max_common_layout(src, src);
constexpr int vec_elem = decltype(size(tiler))::value;
constexpr int vec_bits = vec_elem * sizeof_bits_v<SrcType>;
static_assert(vec_bits >= 128, "Expected at least 128-bits for BLKCP");
// Construct a new concrete Atom of the vector size
auto bulk_atom = Copy_Atom<Copy_Traits<SM90_BULK_COPY_G2S, Int<vec_bits>>, SrcType>{};
return prefetch(bulk_atom, logical_divide(src, tiler));
}
// Backwards-compat. Throw out any extra Copy_Atom args.
template <class... CT_Args, class... CA_Args,
class SrcEngine, class SrcLayout>
CUTE_HOST_DEVICE
void
prefetch(Copy_Atom<Copy_Traits<SM90_BULK_COPY_AUTO, CT_Args...>, CA_Args...> const& atom,
Tensor<SrcEngine, SrcLayout> const& src)
{
return prefetch(static_cast<Copy_Traits<SM90_BULK_COPY_AUTO, CT_Args...> const&>(atom), src);
}
#endif // #if defined(CUTE_COPY_ATOM_TMA_SM90_ENABLED)
} // end namespace cute
| include/cute/algorithm/prefetch.hpp/0 | {
"file_path": "include/cute/algorithm/prefetch.hpp",
"repo_id": "include",
"token_count": 2095
} | 12 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/arch/mma.hpp>
// Config
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900)
# define CUTE_ARCH_MMA_SM90_ENABLED
# define CUTE_ARCH_MMA_F64_SM90_ENABLED
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cute {
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x4 TN
struct SM90_16x8x4_F64F64F64F64_TN
{
using DRegisters = double[4];
using ARegisters = double[2];
using BRegisters = double[1];
using CRegisters = double[4];
CUTE_HOST_DEVICE static void
fma(double & d0, double & d1, double & d2, double & d3,
double const& a0, double const& a1,
double const& b0,
double const& c0, double const& c1, double const& c2, double const& c3)
{
#if defined(CUTE_ARCH_MMA_F64_SM90_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k4.row.col.f64.f64.f64.f64"
"{%0, %1, %2, %3},"
"{%4, %5},"
"{%6},"
"{%7, %8, %9, %10};\n"
: "=d"(d0), "=d"(d1), "=d"(d2), "=d"(d3)
: "d"(a0), "d"(a1),
"d"(b0),
"d"(c0), "d"(c1), "d"(c2), "d"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_16x8x4_F64F64F64F64_TN without CUTE_ARCH_MMA_SM90_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x8 TN
struct SM90_16x8x8_F64F64F64F64_TN
{
using DRegisters = double[4];
using ARegisters = double[4];
using BRegisters = double[2];
using CRegisters = double[4];
CUTE_HOST_DEVICE static void
fma(double & d0, double & d1, double & d2, double & d3,
double const& a0, double const& a1, double const& a2, double const& a3,
double const& b0, double const& b1,
double const& c0, double const& c1, double const& c2, double const& c3)
{
#if defined(CUTE_ARCH_MMA_F64_SM90_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k8.row.col.f64.f64.f64.f64"
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7},"
"{%8, %9},"
"{%10, %11, %12, %13};\n"
: "=d"(d0), "=d"(d1), "=d"(d2), "=d"(d3)
: "d"(a0), "d"(a1), "d"(a2), "d"(a3),
"d"(b0), "d"(b1),
"d"(c0), "d"(c1), "d"(c2), "d"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_16x8x8_F64F64F64F64_TN without CUTE_ARCH_MMA_SM90_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x16 TN
struct SM90_16x8x16_F64F64F64F64_TN
{
using DRegisters = double[4];
using ARegisters = double[8];
using BRegisters = double[4];
using CRegisters = double[4];
CUTE_HOST_DEVICE static void
fma(double & d0, double & d1, double & d2, double & d3,
double const& a0, double const& a1, double const& a2, double const& a3,
double const& a4, double const& a5, double const& a6, double const& a7,
double const& b0, double const& b1, double const& b2, double const& b3,
double const& c0, double const& c1, double const& c2, double const& c3)
{
#if defined(CUTE_ARCH_MMA_F64_SM90_ENABLED)
asm volatile(
"mma.sync.aligned.m16n8k16.row.col.f64.f64.f64.f64"
"{%0, %1, %2, %3},"
"{%4, %5, %6, %7, %8, %9, %10, %11},"
"{%12, %13, %14, %15},"
"{%16, %17, %18, %19};\n"
: "=d"(d0), "=d"(d1), "=d"(d2), "=d"(d3)
: "d"(a0), "d"(a1), "d"(a2), "d"(a3),
"d"(a4), "d"(a5), "d"(a6), "d"(a7),
"d"(b0), "d"(b1), "d"(b2), "d"(b3),
"d"(c0), "d"(c1), "d"(c2), "d"(c3));
#else
CUTE_INVALID_CONTROL_PATH("Attempting to use SM90_16x8x16_F64F64F64F64_TN without CUTE_ARCH_MMA_SM90_ENABLED");
#endif
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x4 TN
struct SM90_16x8x4_C64C64C64C64_TN
{
using DRegisters = complex<double>[4];
using ARegisters = complex<double>[2];
using BRegisters = complex<double>[1];
using CRegisters = complex<double>[4];
CUTE_HOST_DEVICE static void
fma(complex<double> & d0, complex<double> & d1,
complex<double> & d2, complex<double> & d3,
complex<double> const& a0, complex<double> const& a1,
complex<double> const& b0,
complex<double> const& c0, complex<double> const& c1,
complex<double> const& c2, complex<double> const& c3)
{
// Because thrust::complex does not provide a mutable ref
double& rd0 = reinterpret_cast<double(&)[2]>(d0)[0];
double& id0 = reinterpret_cast<double(&)[2]>(d0)[1];
double& rd1 = reinterpret_cast<double(&)[2]>(d1)[0];
double& id1 = reinterpret_cast<double(&)[2]>(d1)[1];
double& rd2 = reinterpret_cast<double(&)[2]>(d2)[0];
double& id2 = reinterpret_cast<double(&)[2]>(d2)[1];
double& rd3 = reinterpret_cast<double(&)[2]>(d3)[0];
double& id3 = reinterpret_cast<double(&)[2]>(d3)[1];
// d.real() = a.real() * b.real() + c.real();
SM90_16x8x4_F64F64F64F64_TN::fma(
rd0, rd1, rd2, rd3,
a0.real(), a1.real(),
b0.real(),
c0.real(), c1.real(), c2.real(), c3.real());
// d.imag() = a.imag() * b.real() + c.imag();
SM90_16x8x4_F64F64F64F64_TN::fma(
id0, id1, id2, id3,
a0.imag(), a1.imag(),
b0.real(),
c0.imag(), c1.imag(), c2.imag(), c3.imag());
// d.real() = -a.imag() * b.imag() + d.real();
SM90_16x8x4_F64F64F64F64_TN::fma(
rd0, rd1, rd2, rd3,
-a0.imag(), -a1.imag(),
b0.imag(),
d0.real(), d1.real(), d2.real(), d3.real());
// d.imag() = a.real() * b.imag() + d.imag();
SM90_16x8x4_F64F64F64F64_TN::fma(
id0, id1, id2, id3,
a0.real(), a1.real(),
b0.imag(),
d0.imag(), d1.imag(), d2.imag(), d3.imag());
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x8 TN
struct SM90_16x8x8_C64C64C64C64_TN
{
using DRegisters = complex<double>[4];
using ARegisters = complex<double>[4];
using BRegisters = complex<double>[2];
using CRegisters = complex<double>[4];
CUTE_HOST_DEVICE static void
fma(complex<double> & d0, complex<double> & d1,
complex<double> & d2, complex<double> & d3,
complex<double> const& a0, complex<double> const& a1,
complex<double> const& a2, complex<double> const& a3,
complex<double> const& b0, complex<double> const& b1,
complex<double> const& c0, complex<double> const& c1,
complex<double> const& c2, complex<double> const& c3)
{
// Because thrust::complex does not provide a mutable ref
double& rd0 = reinterpret_cast<double(&)[2]>(d0)[0];
double& id0 = reinterpret_cast<double(&)[2]>(d0)[1];
double& rd1 = reinterpret_cast<double(&)[2]>(d1)[0];
double& id1 = reinterpret_cast<double(&)[2]>(d1)[1];
double& rd2 = reinterpret_cast<double(&)[2]>(d2)[0];
double& id2 = reinterpret_cast<double(&)[2]>(d2)[1];
double& rd3 = reinterpret_cast<double(&)[2]>(d3)[0];
double& id3 = reinterpret_cast<double(&)[2]>(d3)[1];
// d.real() = a.real() * b.real() + c.real();
SM90_16x8x8_F64F64F64F64_TN::fma(
rd0, rd1, rd2, rd3,
a0.real(), a1.real(), a2.real(), a3.real(),
b0.real(), b1.real(),
c0.real(), c1.real(), c2.real(), c3.real());
// d.imag() = a.imag() * b.real() + c.imag();
SM90_16x8x8_F64F64F64F64_TN::fma(
id0, id1, id2, id3,
a0.imag(), a1.imag(), a2.imag(), a3.imag(),
b0.real(), b1.real(),
c0.imag(), c1.imag(), c2.imag(), c3.imag());
// d.real() = -a.imag() * b.imag() + d.real();
SM90_16x8x8_F64F64F64F64_TN::fma(
rd0, rd1, rd2, rd3,
-a0.imag(), -a1.imag(), -a2.imag(), -a3.imag(),
b0.imag(), b1.imag(),
d0.real(), d1.real(), d2.real(), d3.real());
// d.imag() = a.real() * b.imag() + d.imag();
SM90_16x8x8_F64F64F64F64_TN::fma(
id0, id1, id2, id3,
a0.real(), a1.real(), a2.real(), a3.real(),
b0.imag(), b1.imag(),
d0.imag(), d1.imag(), d2.imag(), d3.imag());
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// MMA 16x8x16 TN
struct SM90_16x8x16_C64C64C64C64_TN
{
using DRegisters = complex<double>[4];
using ARegisters = complex<double>[8];
using BRegisters = complex<double>[4];
using CRegisters = complex<double>[4];
CUTE_HOST_DEVICE static void
fma(complex<double> & d0, complex<double> & d1,
complex<double> & d2, complex<double> & d3,
complex<double> const& a0, complex<double> const& a1,
complex<double> const& a2, complex<double> const& a3,
complex<double> const& a4, complex<double> const& a5,
complex<double> const& a6, complex<double> const& a7,
complex<double> const& b0, complex<double> const& b1,
complex<double> const& b2, complex<double> const& b3,
complex<double> const& c0, complex<double> const& c1,
complex<double> const& c2, complex<double> const& c3)
{
// Because thrust::complex does not provide a mutable ref
double& rd0 = reinterpret_cast<double(&)[2]>(d0)[0];
double& id0 = reinterpret_cast<double(&)[2]>(d0)[1];
double& rd1 = reinterpret_cast<double(&)[2]>(d1)[0];
double& id1 = reinterpret_cast<double(&)[2]>(d1)[1];
double& rd2 = reinterpret_cast<double(&)[2]>(d2)[0];
double& id2 = reinterpret_cast<double(&)[2]>(d2)[1];
double& rd3 = reinterpret_cast<double(&)[2]>(d3)[0];
double& id3 = reinterpret_cast<double(&)[2]>(d3)[1];
// d.real() = a.real() * b.real() + c.real();
SM90_16x8x16_F64F64F64F64_TN::fma(
rd0, rd1, rd2, rd3,
a0.real(), a1.real(), a2.real(), a3.real(),
a4.real(), a5.real(), a6.real(), a7.real(),
b0.real(), b1.real(), b2.real(), b3.real(),
c0.real(), c1.real(), c2.real(), c3.real());
// d.imag() = a.imag() * b.real() + c.imag();
SM90_16x8x16_F64F64F64F64_TN::fma(
id0, id1, id2, id3,
a0.imag(), a1.imag(), a2.imag(), a3.imag(),
a4.imag(), a5.imag(), a6.imag(), a7.imag(),
b0.real(), b1.real(), b2.real(), b3.real(),
c0.imag(), c1.imag(), c2.imag(), c3.imag());
// d.real() = -a.imag() * b.imag() + d.real();
SM90_16x8x16_F64F64F64F64_TN::fma(
rd0, rd1, rd2, rd3,
-a0.imag(), -a1.imag(), -a2.imag(), -a3.imag(),
-a4.imag(), -a5.imag(), -a6.imag(), -a7.imag(),
b0.imag(), b1.imag(), b2.imag(), b3.imag(),
d0.real(), d1.real(), d2.real(), d3.real());
// d.imag() = a.real() * b.imag() + d.imag();
SM90_16x8x16_F64F64F64F64_TN::fma(
id0, id1, id2, id3,
a0.real(), a1.real(), a2.real(), a3.real(),
a4.real(), a5.real(), a6.real(), a7.real(),
b0.imag(), b1.imag(), b2.imag(), b3.imag(),
d0.imag(), d1.imag(), d2.imag(), d3.imag());
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cute
////////////////////////////////////////////////////////////////////////////////////////////////////
#include <cute/arch/mma_sm90_desc.hpp>
#include <cute/arch/mma_sm90_gmma.hpp>
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cute {
namespace GMMA {
template <
class ElementA,
class ElementB,
class ElementC,
class TileShape_MNK,
GMMA::Major MajorA = GMMA::Major::K,
GMMA::Major MajorB = GMMA::Major::K,
auto... Args // e.g. GMMA::ScaleOut::One, [GMMA::ScaleIn::One, GMMA::ScaleIn::One]
// But most commonly leave empty for defaults
>
CUTE_HOST_DEVICE constexpr
auto
ss_op_selector()
{
static_assert(is_static<TileShape_MNK>::value, "TileShape_MNK must be static.");
static_assert(rank(TileShape_MNK{}) == 3, "TileShape_MNK must be rank 3.");
static_assert(size<0>(TileShape_MNK{}) % 64 == 0, "Tile_M must be a multiple of 64.");
auto Tile_N = size<1>(TileShape_MNK{});
// F16 accumulator
if constexpr (is_same_v<ElementC, half_t>) {
// Input A: half_t ; Input B: half_t
if constexpr (is_same_v<ElementA, half_t> && is_same_v<ElementB, half_t>) {
static_assert(size<2>(TileShape_MNK{}) % 16 == 0, "Tile_K must be a multiple of 16.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x16_F16F16F16_SS<MajorA, MajorB, Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e4m3_t ; Input B: float_e4m3_t
else if constexpr (is_same_v<ElementA, float_e4m3_t> && is_same_v<ElementB, float_e4m3_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F16E4M3E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F16E4M3E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F16E4M3E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F16E4M3E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F16E4M3E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F16E4M3E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F16E4M3E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F16E4M3E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F16E4M3E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F16E4M3E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F16E4M3E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F16E4M3E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F16E4M3E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F16E4M3E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F16E4M3E4M3_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F16E4M3E4M3_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F16E4M3E4M3_SS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e4m3_t ; Input B: float_e5m2_t
else if constexpr (is_same_v<ElementA, float_e4m3_t> && is_same_v<ElementB, float_e5m2_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F16E4M3E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F16E4M3E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F16E4M3E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F16E4M3E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F16E4M3E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F16E4M3E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F16E4M3E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F16E4M3E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F16E4M3E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F16E4M3E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F16E4M3E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F16E4M3E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F16E4M3E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F16E4M3E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F16E4M3E5M2_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F16E4M3E5M2_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F16E4M3E5M2_SS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e5m2_t ; Input B: float_e4m3_t
else if constexpr (is_same_v<ElementA, float_e5m2_t> && is_same_v<ElementB, float_e4m3_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F16E5M2E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F16E5M2E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F16E5M2E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F16E5M2E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F16E5M2E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F16E5M2E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F16E5M2E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F16E5M2E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F16E5M2E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F16E5M2E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F16E5M2E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F16E5M2E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F16E5M2E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F16E5M2E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F16E5M2E4M3_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F16E5M2E4M3_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F16E5M2E4M3_SS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e5m2_t ; Input B: float_e5m2_t
else if constexpr (is_same_v<ElementA, float_e5m2_t> && is_same_v<ElementB, float_e5m2_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F16E5M2E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F16E5M2E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F16E5M2E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F16E5M2E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F16E5M2E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F16E5M2E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F16E5M2E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F16E5M2E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F16E5M2E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F16E5M2E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F16E5M2E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F16E5M2E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F16E5M2E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F16E5M2E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F16E5M2E5M2_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F16E5M2E5M2_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F16E5M2E5M2_SS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
else {
static_assert(sizeof(ElementA) == 0, "No eligible GMMA operator for request configuration.");
}
}
// F32 accumulator
else if constexpr (is_same_v<ElementC, float>) {
// Input A: half_t ; Input B: half_t
if constexpr (is_same_v<ElementA, half_t> && is_same_v<ElementB, half_t>) {
static_assert(size<2>(TileShape_MNK{}) % 16 == 0, "Tile_K must be a multiple of 16.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x16_F32F16F16_SS<MajorA, MajorB, Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: bfloat16_t ; Input B: bfloat16_t
else if constexpr (is_same_v<ElementA, bfloat16_t> && is_same_v<ElementB, bfloat16_t>) {
static_assert(size<2>(TileShape_MNK{}) % 16 == 0, "Tile_K must be a multiple of 16.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x16_F32BF16BF16_SS<MajorA, MajorB, Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: tfloat32_t ; Input B: tfloat32_t
else if constexpr (is_same_v<ElementA, tfloat32_t> && is_same_v<ElementB, tfloat32_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 8 == 0, "Tile_K must be a multiple of 8.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x8_F32TF32TF32_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x8_F32TF32TF32_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x8_F32TF32TF32_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x8_F32TF32TF32_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x8_F32TF32TF32_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x8_F32TF32TF32_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x8_F32TF32TF32_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x8_F32TF32TF32_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x8_F32TF32TF32_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x8_F32TF32TF32_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x8_F32TF32TF32_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x8_F32TF32TF32_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x8_F32TF32TF32_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x8_F32TF32TF32_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x8_F32TF32TF32_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x8_F32TF32TF32_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x8_F32TF32TF32_SS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e4m3_t ; Input B: float_e4m3_t
else if constexpr (is_same_v<ElementA, float_e4m3_t> && is_same_v<ElementB, float_e4m3_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F32E4M3E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F32E4M3E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F32E4M3E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F32E4M3E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F32E4M3E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F32E4M3E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F32E4M3E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F32E4M3E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F32E4M3E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F32E4M3E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F32E4M3E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F32E4M3E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F32E4M3E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F32E4M3E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F32E4M3E4M3_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F32E4M3E4M3_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F32E4M3E4M3_SS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e4m3_t ; Input B: float_e5m2_t
else if constexpr (is_same_v<ElementA, float_e4m3_t> && is_same_v<ElementB, float_e5m2_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F32E4M3E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F32E4M3E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F32E4M3E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F32E4M3E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F32E4M3E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F32E4M3E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F32E4M3E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F32E4M3E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F32E4M3E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F32E4M3E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F32E4M3E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F32E4M3E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F32E4M3E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F32E4M3E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F32E4M3E5M2_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F32E4M3E5M2_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F32E4M3E5M2_SS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e5m2_t ; Input B: float_e4m3_t
else if constexpr (is_same_v<ElementA, float_e5m2_t> && is_same_v<ElementB, float_e4m3_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F32E5M2E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F32E5M2E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F32E5M2E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F32E5M2E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F32E5M2E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F32E5M2E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F32E5M2E4M3_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F32E5M2E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F32E5M2E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F32E5M2E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F32E5M2E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F32E5M2E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F32E5M2E4M3_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F32E5M2E4M3_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F32E5M2E4M3_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F32E5M2E4M3_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F32E5M2E4M3_SS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e5m2_t ; Input B: float_e5m2_t
else if constexpr (is_same_v<ElementA, float_e5m2_t> && is_same_v<ElementB, float_e5m2_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F32E5M2E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F32E5M2E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F32E5M2E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F32E5M2E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F32E5M2E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F32E5M2E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F32E5M2E5M2_SS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F32E5M2E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F32E5M2E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F32E5M2E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F32E5M2E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F32E5M2E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F32E5M2E5M2_SS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F32E5M2E5M2_SS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F32E5M2E5M2_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F32E5M2E5M2_SS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F32E5M2E5M2_SS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
else {
static_assert(sizeof(ElementA) == 0, "No eligible GMMA operator for request configuration.");
}
}
// S32 accumulator
else if constexpr (is_same_v<ElementC, int32_t>) {
// Input A: int8_t ; Input B: int8_t
if constexpr (is_same_v<ElementA, int8_t> && is_same_v<ElementB, int8_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_S32S8S8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_S32S8S8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_S32S8S8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_S32S8S8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_S32S8S8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_S32S8S8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_S32S8S8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_S32S8S8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_S32S8S8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_S32S8S8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_S32S8S8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_S32S8S8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_S32S8S8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_S32S8S8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_S32S8S8_SS_TN{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_S32S8S8_SS_TN{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_S32S8S8_SS_TN{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: int8_t ; Input B: uint8_t
else if constexpr (is_same_v<ElementA, int8_t> && is_same_v<ElementB, uint8_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_S32S8U8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_S32S8U8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_S32S8U8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_S32S8U8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_S32S8U8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_S32S8U8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_S32S8U8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_S32S8U8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_S32S8U8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_S32S8U8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_S32S8U8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_S32S8U8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_S32S8U8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_S32S8U8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_S32S8U8_SS_TN{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_S32S8U8_SS_TN{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_S32S8U8_SS_TN{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: uint8_t ; Input B: int8_t
else if constexpr (is_same_v<ElementA, uint8_t> && is_same_v<ElementB, int8_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_S32U8S8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_S32U8S8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_S32U8S8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_S32U8S8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_S32U8S8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_S32U8S8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_S32U8S8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_S32U8S8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_S32U8S8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_S32U8S8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_S32U8S8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_S32U8S8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_S32U8S8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_S32U8S8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_S32U8S8_SS_TN{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_S32U8S8_SS_TN{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_S32U8S8_SS_TN{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: uint8_t ; Input B: uint8_t
else if constexpr (is_same_v<ElementA, uint8_t> && is_same_v<ElementB, uint8_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_S32U8U8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_S32U8U8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_S32U8U8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_S32U8U8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_S32U8U8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_S32U8U8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_S32U8U8_SS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_S32U8U8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_S32U8U8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_S32U8U8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_S32U8U8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_S32U8U8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_S32U8U8_SS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_S32U8U8_SS_TN{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_S32U8U8_SS_TN{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_S32U8U8_SS_TN{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_S32U8U8_SS_TN{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
else {
static_assert(sizeof(ElementA) == 0, "No eligible GMMA operator for request configuration.");
}
}
// Unknown accumulator type
else {
static_assert(sizeof(ElementC) == 0, "Unknown ElementC accumulator type.");
}
}
template <
class ElementA,
class ElementB,
class ElementC,
class TileShape_MNK,
GMMA::Major MajorA = GMMA::Major::K,
GMMA::Major MajorB = GMMA::Major::K,
auto... Args // e.g. GMMA::ScaleOut::One, [GMMA::ScaleIn::One, GMMA::ScaleIn::One]
// But most commonly leave empty for defaults
>
CUTE_HOST_DEVICE constexpr
auto
rs_op_selector()
{
static_assert(is_static<TileShape_MNK>::value, "TileShape_MNK must be static.");
static_assert(rank(TileShape_MNK{}) == 3, "TileShape_MNK must be rank 3.");
static_assert(size<0>(TileShape_MNK{}) % 64 == 0, "Tile_M must be a multiple of 64.");
static_assert(MajorA == GMMA::Major::K, "Register source A operand GMMAs must have K-major A layout.");
auto Tile_N = size<1>(TileShape_MNK{});
// F16 accumulator
if constexpr (is_same_v<ElementC, half_t>) {
// Input A: half_t ; Input B: half_t
if constexpr (is_same_v<ElementA, half_t> && is_same_v<ElementB, half_t>) {
static_assert(size<2>(TileShape_MNK{}) % 16 == 0, "Tile_K must be a multiple of 16.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x16_F16F16F16_RS<MajorA, MajorB, Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e4m3_t ; Input B: float_e4m3_t
else if constexpr (is_same_v<ElementA, float_e4m3_t> && is_same_v<ElementB, float_e4m3_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F16E4M3E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F16E4M3E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F16E4M3E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F16E4M3E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F16E4M3E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F16E4M3E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F16E4M3E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F16E4M3E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F16E4M3E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F16E4M3E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F16E4M3E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F16E4M3E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F16E4M3E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F16E4M3E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F16E4M3E4M3_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F16E4M3E4M3_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F16E4M3E4M3_RS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e4m3_t ; Input B: float_e5m2_t
else if constexpr (is_same_v<ElementA, float_e4m3_t> && is_same_v<ElementB, float_e5m2_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F16E4M3E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F16E4M3E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F16E4M3E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F16E4M3E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F16E4M3E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F16E4M3E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F16E4M3E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F16E4M3E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F16E4M3E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F16E4M3E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F16E4M3E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F16E4M3E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F16E4M3E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F16E4M3E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F16E4M3E5M2_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F16E4M3E5M2_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F16E4M3E5M2_RS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e5m2_t ; Input B: float_e4m3_t
else if constexpr (is_same_v<ElementA, float_e5m2_t> && is_same_v<ElementB, float_e4m3_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F16E5M2E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F16E5M2E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F16E5M2E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F16E5M2E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F16E5M2E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F16E5M2E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F16E5M2E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F16E5M2E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F16E5M2E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F16E5M2E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F16E5M2E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F16E5M2E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F16E5M2E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F16E5M2E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F16E5M2E4M3_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F16E5M2E4M3_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F16E5M2E4M3_RS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e5m2_t ; Input B: float_e5m2_t
else if constexpr (is_same_v<ElementA, float_e5m2_t> && is_same_v<ElementB, float_e5m2_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F16E5M2E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F16E5M2E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F16E5M2E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F16E5M2E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F16E5M2E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F16E5M2E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F16E5M2E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F16E5M2E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F16E5M2E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F16E5M2E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F16E5M2E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F16E5M2E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F16E5M2E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F16E5M2E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F16E5M2E5M2_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F16E5M2E5M2_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F16E5M2E5M2_RS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
else {
static_assert(sizeof(ElementA) == 0, "No eligible GMMA operator for request configuration.");
}
}
// F32 accumulator
else if constexpr (is_same_v<ElementC, float>) {
// Input A: half_t ; Input B: half_t
if constexpr (is_same_v<ElementA, half_t> && is_same_v<ElementB, half_t>) {
static_assert(size<2>(TileShape_MNK{}) % 16 == 0, "Tile_K must be a multiple of 16.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x16_F32F16F16_RS<MajorA, MajorB, Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: bfloat16_t ; Input B: bfloat16_t
else if constexpr (is_same_v<ElementA, bfloat16_t> && is_same_v<ElementB, bfloat16_t>) {
static_assert(size<2>(TileShape_MNK{}) % 16 == 0, "Tile_K must be a multiple of 16.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x16_F32BF16BF16_RS<MajorA, MajorB, Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: tfloat32_t ; Input B: tfloat32_t
else if constexpr (is_same_v<ElementA, tfloat32_t> && is_same_v<ElementB, tfloat32_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 8 == 0, "Tile_K must be a multiple of 8.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x8_F32TF32TF32_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x8_F32TF32TF32_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x8_F32TF32TF32_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x8_F32TF32TF32_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x8_F32TF32TF32_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x8_F32TF32TF32_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x8_F32TF32TF32_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x8_F32TF32TF32_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x8_F32TF32TF32_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x8_F32TF32TF32_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x8_F32TF32TF32_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x8_F32TF32TF32_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x8_F32TF32TF32_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x8_F32TF32TF32_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x8_F32TF32TF32_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x8_F32TF32TF32_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x8_F32TF32TF32_RS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e4m3_t ; Input B: float_e4m3_t
else if constexpr (is_same_v<ElementA, float_e4m3_t> && is_same_v<ElementB, float_e4m3_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F32E4M3E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F32E4M3E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F32E4M3E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F32E4M3E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F32E4M3E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F32E4M3E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F32E4M3E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F32E4M3E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F32E4M3E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F32E4M3E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F32E4M3E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F32E4M3E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F32E4M3E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F32E4M3E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F32E4M3E4M3_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F32E4M3E4M3_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F32E4M3E4M3_RS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e4m3_t ; Input B: float_e5m2_t
else if constexpr (is_same_v<ElementA, float_e4m3_t> && is_same_v<ElementB, float_e5m2_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F32E4M3E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F32E4M3E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F32E4M3E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F32E4M3E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F32E4M3E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F32E4M3E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F32E4M3E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F32E4M3E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F32E4M3E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F32E4M3E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F32E4M3E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F32E4M3E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F32E4M3E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F32E4M3E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F32E4M3E5M2_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F32E4M3E5M2_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F32E4M3E5M2_RS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e5m2_t ; Input B: float_e4m3_t
else if constexpr (is_same_v<ElementA, float_e5m2_t> && is_same_v<ElementB, float_e4m3_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F32E5M2E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F32E5M2E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F32E5M2E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F32E5M2E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F32E5M2E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F32E5M2E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F32E5M2E4M3_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F32E5M2E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F32E5M2E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F32E5M2E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F32E5M2E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F32E5M2E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F32E5M2E4M3_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F32E5M2E4M3_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F32E5M2E4M3_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F32E5M2E4M3_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F32E5M2E4M3_RS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: float_e5m2_t ; Input B: float_e5m2_t
else if constexpr (is_same_v<ElementA, float_e5m2_t> && is_same_v<ElementB, float_e5m2_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_F32E5M2E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_F32E5M2E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_F32E5M2E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_F32E5M2E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_F32E5M2E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_F32E5M2E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_F32E5M2E5M2_RS_TN<Args...>{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_F32E5M2E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_F32E5M2E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_F32E5M2E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_F32E5M2E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_F32E5M2E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_F32E5M2E5M2_RS_TN<Args...>{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_F32E5M2E5M2_RS_TN<Args...>{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_F32E5M2E5M2_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_F32E5M2E5M2_RS_TN<Args...>{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_F32E5M2E5M2_RS_TN<Args...>{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
else {
static_assert(sizeof(ElementA) == 0, "No eligible GMMA operator for request configuration.");
}
}
// S32 accumulator
else if constexpr (is_same_v<ElementC, int32_t>) {
// Input A: int8_t ; Input B: int8_t
if constexpr (is_same_v<ElementA, int8_t> && is_same_v<ElementB, int8_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_S32S8S8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_S32S8S8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_S32S8S8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_S32S8S8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_S32S8S8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_S32S8S8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_S32S8S8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_S32S8S8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_S32S8S8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_S32S8S8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_S32S8S8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_S32S8S8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_S32S8S8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_S32S8S8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_S32S8S8_RS_TN{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_S32S8S8_RS_TN{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_S32S8S8_RS_TN{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: int8_t ; Input B: uint8_t
else if constexpr (is_same_v<ElementA, int8_t> && is_same_v<ElementB, uint8_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_S32S8U8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_S32S8U8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_S32S8U8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_S32S8U8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_S32S8U8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_S32S8U8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_S32S8U8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_S32S8U8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_S32S8U8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_S32S8U8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_S32S8U8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_S32S8U8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_S32S8U8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_S32S8U8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_S32S8U8_RS_TN{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_S32S8U8_RS_TN{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_S32S8U8_RS_TN{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: uint8_t ; Input B: int8_t
else if constexpr (is_same_v<ElementA, uint8_t> && is_same_v<ElementB, int8_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_S32U8S8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_S32U8S8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_S32U8S8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_S32U8S8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_S32U8S8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_S32U8S8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_S32U8S8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_S32U8S8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_S32U8S8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_S32U8S8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_S32U8S8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_S32U8S8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_S32U8S8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_S32U8S8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_S32U8S8_RS_TN{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_S32U8S8_RS_TN{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_S32U8S8_RS_TN{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
// Input A: uint8_t ; Input B: uint8_t
else if constexpr (is_same_v<ElementA, uint8_t> && is_same_v<ElementB, uint8_t>) {
static_assert(MajorA == GMMA::Major::K, "MajorA must be GMMA::Major::K for this config.");
static_assert(MajorB == GMMA::Major::K, "MajorB must be GMMA::Major::K for this config.");
static_assert(size<2>(TileShape_MNK{}) % 32 == 0, "Tile_K must be a multiple of 32.");
if constexpr (Tile_N % 256 == 0) {
return SM90_64x256x32_S32U8U8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 240 == 0) {
return SM90_64x240x32_S32U8U8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 224 == 0) {
return SM90_64x224x32_S32U8U8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 208 == 0) {
return SM90_64x208x32_S32U8U8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 192 == 0) {
return SM90_64x192x32_S32U8U8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 176 == 0) {
return SM90_64x176x32_S32U8U8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 160 == 0) {
return SM90_64x160x32_S32U8U8_RS_TN{};
}
#endif
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 144 == 0) {
return SM90_64x144x32_S32U8U8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 128 == 0) {
return SM90_64x128x32_S32U8U8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 112 == 0) {
return SM90_64x112x32_S32U8U8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 96 == 0) {
return SM90_64x96x32_S32U8U8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 80 == 0) {
return SM90_64x80x32_S32U8U8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 64 == 0) {
return SM90_64x64x32_S32U8U8_RS_TN{};
}
#if defined(CUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
else if constexpr (Tile_N % 48 == 0) {
return SM90_64x48x32_S32U8U8_RS_TN{};
}
#endif
else if constexpr (Tile_N % 32 == 0) {
return SM90_64x32x32_S32U8U8_RS_TN{};
}
else if constexpr (Tile_N % 16 == 0) {
return SM90_64x16x32_S32U8U8_RS_TN{};
}
else if constexpr (Tile_N % 8 == 0) {
return SM90_64x8x32_S32U8U8_RS_TN{};
}
else {
static_assert(Tile_N % 8 == 0, "Tile_N must be a multiple of 8.");
}
}
else {
static_assert(sizeof(ElementA) == 0, "No eligible GMMA operator for request configuration.");
}
}
// Unknown accumulator type
else {
static_assert(sizeof(ElementC) == 0, "Unknown ElementC accumulator type.");
}
}
} // end namespace GMMA
} // end namespace cute
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cute/arch/mma_sm90.hpp/0 | {
"file_path": "include/cute/arch/mma_sm90.hpp",
"repo_id": "include",
"token_count": 56104
} | 13 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/arch/mma_sm70.hpp>
#include <cute/atom/mma_traits.hpp>
#include <cute/layout.hpp>
namespace cute
{
namespace {
// Logical thread id to thread idx (quadpair)
using SM70_QuadPair = Layout<Shape <_4, _2>,
Stride<_1,_16>>;
// (T8,V4) -> (M8,K4)
using SM70_8x4_Row = Layout<Shape <_8,_4>,
Stride<_1,_8>>;
// (T8,V4) -> (M8,K4)
using SM70_8x4_Col = Layout<Shape <Shape <_4,_2>,_4>,
Stride<Stride<_8,_4>,_1>>;
// (T8,V8) -> (M8,N8)
using SM70_8x8_16b = Layout<Shape <_8,_8>,
Stride<_1,_8>>;
// (T8,V8) -> (M8,N8)
using SM70_8x8_32b = Layout<Shape <Shape <_2, _2,_2>,Shape <_2,_2, _2>>,
Stride<Stride<_1,_16,_4>,Stride<_8,_2,_32>>>;
}
///////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM70_8x8x4_F16F16F16F16_TN>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using Shape_MNK = Shape<_8,_8,_4>;
using ThrID = SM70_QuadPair;
using ALayout = SM70_8x4_Row;
using BLayout = SM70_8x4_Row;
using CLayout = SM70_8x8_16b;
};
///////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM70_8x8x4_F16F16F16F16_NT>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using Shape_MNK = Shape<_8,_8,_4>;
using ThrID = SM70_QuadPair;
using ALayout = SM70_8x4_Col;
using BLayout = SM70_8x4_Col;
using CLayout = SM70_8x8_16b;
};
///////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM70_8x8x4_F16F16F16F16_NN>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using Shape_MNK = Shape<_8,_8,_4>;
using ThrID = SM70_QuadPair;
using ALayout = SM70_8x4_Col;
using BLayout = SM70_8x4_Row;
using CLayout = SM70_8x8_16b;
};
///////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM70_8x8x4_F16F16F16F16_TT>
{
using ValTypeD = half_t;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = half_t;
using Shape_MNK = Shape<_8,_8,_4>;
using ThrID = SM70_QuadPair;
using ALayout = SM70_8x4_Row;
using BLayout = SM70_8x4_Col;
using CLayout = SM70_8x8_16b;
};
///////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM70_8x8x4_F32F16F16F32_TN>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using Shape_MNK = Shape<_8,_8,_4>;
using ThrID = SM70_QuadPair;
using ALayout = SM70_8x4_Row;
using BLayout = SM70_8x4_Row;
using CLayout = SM70_8x8_32b;
};
///////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM70_8x8x4_F32F16F16F32_NT>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using Shape_MNK = Shape<_8,_8,_4>;
using ThrID = SM70_QuadPair;
using ALayout = SM70_8x4_Col;
using BLayout = SM70_8x4_Col;
using CLayout = SM70_8x8_32b;
};
///////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM70_8x8x4_F32F16F16F32_NN>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using Shape_MNK = Shape<_8,_8,_4>;
using ThrID = SM70_QuadPair;
using ALayout = SM70_8x4_Col;
using BLayout = SM70_8x4_Row;
using CLayout = SM70_8x8_32b;
};
///////////////////////////////////////////////////////////////////////////////
template <>
struct MMA_Traits<SM70_8x8x4_F32F16F16F32_TT>
{
using ValTypeD = float;
using ValTypeA = half_t;
using ValTypeB = half_t;
using ValTypeC = float;
using Shape_MNK = Shape<_8,_8,_4>;
using ThrID = SM70_QuadPair;
using ALayout = SM70_8x4_Row;
using BLayout = SM70_8x4_Col;
using CLayout = SM70_8x8_32b;
};
///////////////////////////////////////////////////////////////////////////////
} // namespace cute
| include/cute/atom/mma_traits_sm70.hpp/0 | {
"file_path": "include/cute/atom/mma_traits_sm70.hpp",
"repo_id": "include",
"token_count": 2332
} | 14 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/underscore.hpp>
#include <cute/int_tuple.hpp>
#include <cute/stride.hpp>
#include <cute/numeric/arithmetic_tuple.hpp>
#include <cute/numeric/integral_ratio.hpp>
#include <cute/numeric/integral_constant.hpp>
namespace cute
{
// Aliases
template <class... Shapes>
using Shape = cute::tuple<Shapes...>;
template <class... Strides>
using Stride = cute::tuple<Strides...>;
template <class... Strides>
using Step = cute::tuple<Strides...>;
template <class... Coords>
using Coord = cute::tuple<Coords...>;
template <class... Layouts>
using Tile = cute::tuple<Layouts...>;
template <class... Ts>
CUTE_HOST_DEVICE constexpr
Shape<Ts...>
make_shape(Ts const&... t) {
return {t...};
}
template <class... Ts>
CUTE_HOST_DEVICE constexpr
Stride<Ts...>
make_stride(Ts const&... t) {
return {t...};
}
template <class... Ts>
CUTE_HOST_DEVICE constexpr
Step<Ts...>
make_step(Ts const&... t) {
return {t...};
}
template <class... Ts>
CUTE_HOST_DEVICE constexpr
Coord<Ts...>
make_coord(Ts const&... t) {
return {t...};
}
template <class... Ts>
CUTE_HOST_DEVICE constexpr
Tile<Ts...>
make_tile(Ts const&... t)
{
return {t...};
}
//
// Layout
//
template <class Shape, class Stride = LayoutLeft::Apply<Shape> >
struct Layout
: private cute::tuple<Shape, Stride> // EBO for static layouts
{
// Expensive in compilation time...
//static_assert(is_congruent<Shape, Stride>::value, "Shape and Stride must be congruent");
// NOTE: This defaults static Shapes/Strides correctly, but not dynamic
CUTE_HOST_DEVICE constexpr
Layout(Shape const& shape = {}, Stride const& stride = {})
: cute::tuple<Shape, Stride>(shape, stride)
{}
//
// Accessors
//
static constexpr int rank = rank_v<Shape>;
CUTE_HOST_DEVICE constexpr
decltype(auto)
layout() {
return *this;
}
CUTE_HOST_DEVICE constexpr
decltype(auto)
layout() const {
return *this;
}
template <int... I>
CUTE_HOST_DEVICE constexpr
decltype(auto)
shape() {
return get<0,I...>(static_cast<cute::tuple<Shape, Stride>&>(*this));
}
template <int... I>
CUTE_HOST_DEVICE constexpr
decltype(auto)
shape() const {
return get<0,I...>(static_cast<cute::tuple<Shape, Stride> const&>(*this));
}
template <int... I>
CUTE_HOST_DEVICE constexpr
decltype(auto)
stride() {
return get<1,I...>(static_cast<cute::tuple<Shape, Stride>&>(*this));
}
template <int... I>
CUTE_HOST_DEVICE constexpr
decltype(auto)
stride() const {
return get<1,I...>(static_cast<cute::tuple<Shape, Stride> const&>(*this));
}
//
// Mappings
//
// Map a logical coordinate to a linear index (Coord has no Underscore slice operators)
// OR
// Slice the layout and return the sublayout (Coord has an Underscore slice op)
template <class Coord>
CUTE_HOST_DEVICE constexpr
auto
operator()(Coord const& coord) const {
if constexpr (has_underscore<Coord>::value) {
return slice(coord, *this);
} else {
return crd2idx(coord, shape(), stride());
}
CUTE_GCC_UNREACHABLE;
}
// Convenience function for multi-dimensional coordinates
template <class Coord0, class Coord1, class... Coords>
CUTE_HOST_DEVICE constexpr
auto
operator()(Coord0 const& c0, Coord1 const& c1, Coords const&... cs) const {
return operator()(make_coord(c0,c1,cs...));
}
//
// Compose
//
template <class OtherLayout>
CUTE_HOST_DEVICE constexpr
auto
compose(OtherLayout const& other) const {
return composition(*this, other);
}
template <class... Layouts>
CUTE_HOST_DEVICE constexpr
auto
compose(Layouts const&... layouts) const {
return composition(*this, make_tile(layouts...));
}
template <class OtherShape>
CUTE_HOST_DEVICE constexpr
auto
with_shape(OtherShape const& shape) const {
return composition(*this, make_layout(shape));
}
template <class... Shapes>
CUTE_HOST_DEVICE constexpr
auto
with_shape(Shapes const&... shapes) const {
return composition(*this, make_layout(make_shape(shapes...)));
}
//
// Tile
//
template <class OtherLayout>
CUTE_HOST_DEVICE constexpr
auto
tile(OtherLayout const& other) const {
return tiled_divide(*this, other);
}
template <class... Layouts>
CUTE_HOST_DEVICE constexpr
auto
tile(Layouts const&... layouts) const {
return tiled_divide(*this, make_tile(layouts...));
}
//
// Utility
//
//
// Index to Coordinate
//
// NOTE: Only valid for compact layouts
// Return the (hierarchical) ND logical coordinate corresponding to the linear index
// @post crd2idx(@a result, shape(), stride()) == idx
// @post congruent(@a result, shape())
template <class IInt,
__CUTE_REQUIRES(is_integral<IInt>::value)>
CUTE_HOST_DEVICE constexpr
auto
get_hier_coord(IInt const& idx) const {
return cute::idx2crd(idx, shape(), stride());
}
// Return the (flat) ND logical coordinate corresponding to the linear index
// @post crd2idx(@a result, shape(), stride()) == idx
// @post rank(@a result) == rank(shape()) && depth(@a result) == 1
template <class IInt,
__CUTE_REQUIRES(is_integral<IInt>::value)>
CUTE_HOST_DEVICE constexpr
auto
get_flat_coord(IInt const& idx) const {
return cute::crd2crd(this->get_hier_coord(idx), shape(), repeat<rank>(Int<1>{}));
}
// Return the generalized column-major 1D logical coordinate corresponding to the linear index
// @post crd2idx(@a result, shape(), stride()) == idx
// @post is_integral<decltype(@a result)>::value
template <class IInt,
__CUTE_REQUIRES(is_integral<IInt>::value)>
CUTE_HOST_DEVICE constexpr
auto
get_1d_coord(IInt const& idx) const {
return cute::crd2idx(this->get_hier_coord(idx), shape());
}
//
// Coordinate to Coordinate
//
#if 0
// Return the (hierarchical) ND logical coordinate corresponding to the linear index
// @post congruent(@a result, shape())
template <class Coord>
CUTE_HOST_DEVICE constexpr
auto
crd_2_hier_coord(Coord const& crd) const {
return cute::crd2crd(crd, shape(), shape());
}
// Return the (flat) ND logical coordinate corresponding to the linear index
// @post rank(@a result) == rank(shape()) && depth(@a result) == 1
template <class Coord>
CUTE_HOST_DEVICE constexpr
auto
crd_2_flat_coord(Coord const& crd) const {
return cute::crd2crd(crd, shape(), product_each(shape()));
}
// Return the generalized column-major 1D logical coordinate corresponding to the linear index
// @post is_integral<decltype(@a result)>::value
template <class Coord>
CUTE_HOST_DEVICE constexpr
auto
crd_2_1d_coord(Coord const& crd) const {
//return cute::crd2crd(crd, shape(), product(shape()));
return cute::crd2idx(crd, shape());
}
#endif
};
// Equality, return a static or dynamic boolean
template <class ShapeA, class StrideA,
class ShapeB, class StrideB>
CUTE_HOST_DEVICE constexpr
auto
operator==(Layout<ShapeA,StrideA> const& layoutA, Layout<ShapeB,StrideB> const& layoutB)
{
return layoutA.shape() == layoutB.shape() && layoutA.stride() == layoutB.stride();
}
template <class Layout>
struct is_layout : false_type {};
template <class Shape, class Stride>
struct is_layout<Layout<Shape,Stride>> : true_type {};
//
// Layout construction
//
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
make_layout(Shape const& shape, Stride const& stride)
{
static_assert(is_tuple<Shape >::value || is_integral<Shape >::value);
static_assert(is_tuple<Stride>::value || is_integral<Stride>::value);
return Layout<Shape,Stride>(shape, stride);
}
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
make_layout(Shape const& shape)
{
static_assert(is_tuple<Shape >::value || is_integral<Shape >::value);
return make_layout(shape, compact_major<LayoutLeft>(shape));
}
//
// Convenience tags for common layouts
//
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
make_layout(Shape const& shape, LayoutLeft)
{
return make_layout(shape, compact_major<LayoutLeft>(shape));
}
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
make_layout(Shape const& shape, LayoutRight)
{
return make_layout(shape, compact_major<LayoutRight>(shape));
}
//
// Construct a layout from multiple layouts by concatenation
//
// One argument overload
template <class Shape0, class Stride0>
CUTE_HOST_DEVICE constexpr
auto
make_layout(Layout<Shape0,Stride0> const& layout0)
{
return make_layout(make_shape (layout0.shape() ),
make_stride(layout0.stride()));
}
// Two argument overload
template <class Shape0, class Stride0,
class Shape1, class Stride1>
CUTE_HOST_DEVICE constexpr
auto
make_layout(Layout<Shape0,Stride0> const& layout0,
Layout<Shape1,Stride1> const& layout1)
{
return make_layout(make_shape (layout0.shape() , layout1.shape() ),
make_stride(layout0.stride(), layout1.stride()));
}
// Var argument overload
template <class Shape0, class Stride0,
class Shape1, class Stride1,
class... Shapes, class... Strides>
CUTE_HOST_DEVICE constexpr
auto
make_layout(Layout<Shape0,Stride0> const& layout0,
Layout<Shape1,Stride1> const& layout1,
Layout<Shapes,Strides> const&... layouts)
{
return make_layout(make_shape (layout0.shape() , layout1.shape() , layouts.shape()... ),
make_stride(layout0.stride(), layout1.stride(), layouts.stride()...));
}
//
// Advanced Layout constructions
//
// Make a compact layout with shape @a shape and strides following the order induced by @a order.
// Dynamic values in @a order are ignored, considered large, and considered ordered from left to right.
// Example:
// make_ordered_layout(Shape<_2,_2,_2,_2>{}, Step<_0,_2,_3,_1>{})
// -> (_2,_2,_2,_2):(_1,_4,_8,_2)
// make_ordered_layout(make_shape(2,3,4,5), make_step(Int<2>{}, 67, 42, Int<50>{}))
// -> (2,3,4,5):(_1,10,30,2)
template <class Shape, class Order>
CUTE_HOST_DEVICE constexpr
auto
make_ordered_layout(Shape const& shape, Order const& order)
{
return make_layout(shape, compact_order(shape, order));
}
// Make a compact layout with the same shape as @a layout
// and strides following the order induced by @a layout.stride().
// Static-0 strides in the input @a layout are preserved in the output.
// Example:
// make_layout_like(Layout<Shape<_2,_2,_2,_2>, Stride<_0,_2,_4,_1>>{})
// -> (_2,_2,_2,_2):(_0,_2,_4,_1)
// make_layout_like(make_layout(make_shape(2,3,4,5), make_stride(Int<0>{},42,Int<1>{},Int<0>{})))
// -> (2,3,4,5):(_0,4,_1,_0)
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
make_layout_like(Layout<Shape,Stride> const& layout)
{
return make_layout(layout.shape(),
compact_order(filter_zeros(layout.stride(), layout.shape()), layout.stride()));
}
// Make a compact layout with the same shape as @a layout
// and strides following the order induced by @a layout.stride(),
// except mode-0 is always stride-1 and generated column-major.
// The 0th mode is commonly used for MMA_Atoms or Copy_Atoms so this
// generates the 0th mode with LayoutLeft (preserving stride-0s) regardless of the reference layout
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
make_fragment_like(Layout<Shape,Stride> const& layout)
{
constexpr int R = Layout<Shape,Stride>::rank;
if constexpr (R > 1 && is_static<Shape>::value) {
return tiled_product(make_layout(get<0>(layout.shape()),
compact_major<LayoutLeft>(filter_zeros(get<0>(layout.stride()), get<0>(layout.shape())))),
make_ordered_layout(take<1,R>(layout.shape()), take<1,R>(layout.stride())));
} else {
return make_layout(layout.shape());
}
CUTE_GCC_UNREACHABLE;
}
template <class Shape,
__CUTE_REQUIRES(is_tuple<Shape>::value || is_integral<Shape>::value)>
CUTE_HOST_DEVICE constexpr
auto
make_fragment_like(Shape const& shape)
{
return make_layout(shape);
}
//
// Make an identity layout that maps a coordinate to itself
//
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
make_identity_layout(Shape const& shape)
{
return make_layout(shape, make_basis_like(shape));
}
//
// Operations to manipulate Layouts like a tuple of pairs
//
// Return the Is...th sublayout.
// For Is... = <I0,I1,...,IN>, equivalent to get<IN>(...get<I1>(get<I0>(layout)))
template <size_t... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
get(Layout<Shape,Stride> const& layout)
{
return make_layout(get<Is...>(layout.shape()),
get<Is...>(layout.stride()));
}
// Return a new layout with only the modes in the range [B,E)
template <int B, int E, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
take(Layout<Shape,Stride> const& layout)
{
static_assert(B < E, "take: empty range error");
static_assert(0 <= B && E <= Layout<Shape,Stride>::rank, "take: range out of bounds");
return make_layout(take<B,E>(layout.shape()),
take<B,E>(layout.stride()));
}
// Return a new layout with only the modes Is... = <I0,I1,...,IN>
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
select(Layout<Shape,Stride> const& layout)
{
return make_layout(select<Is...>(layout.shape()),
select<Is...>(layout.stride()));
}
// Return a layout with depth at most 1
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
flatten(Layout<Shape,Stride> const& layout)
{
return make_layout(flatten(layout.shape()),
flatten(layout.stride()));
}
// Return a layout whose profile is congruent to TargetProfile
// @pre Input layout is flat, flatten(@a layout) == @a layout
// @pre Input layout can be folded to profile, rank(@a layout) == rank(flatten(@a target_profile))
// @post congruent(@a result, @a target_profile)
template <class Shape, class Stride, class TargetProfile>
CUTE_HOST_DEVICE constexpr
auto
unflatten(Layout<Shape,Stride> const& layout, TargetProfile const& target_profile)
{
return make_layout(unflatten(layout.shape(), target_profile),
unflatten(layout.stride(), target_profile));
}
//
// Utilities
//
// Return the sublayout of mode I...
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
decltype(auto)
layout(Layout<Shape,Stride> const& layout)
{
if constexpr (sizeof...(Is) == 0) {
return layout;
} else {
return get<Is...>(layout);
}
CUTE_GCC_UNREACHABLE;
}
// Return the shape of a mode
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
decltype(auto)
shape(Layout<Shape,Stride>& layout)
{
return layout.template shape<Is...>();
}
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
decltype(auto)
shape(Layout<Shape,Stride> const& layout)
{
return layout.template shape<Is...>();
}
// Return the stride of a mode
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
decltype(auto)
stride(Layout<Shape,Stride>& layout)
{
return layout.template stride<Is...>();
}
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
decltype(auto)
stride(Layout<Shape,Stride> const& layout)
{
return layout.template stride<Is...>();
}
// Return the number of elements in a mode
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
size(Layout<Shape,Stride> const& layout)
{
return size(shape<Is...>(layout));
}
// Return the number of modes
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
rank(Layout<Shape,Stride> const& layout)
{
return rank(shape<Is...>(layout));
}
// Return the depth of the layout
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
depth(Layout<Shape,Stride> const& layout)
{
return depth(shape<Is...>(layout));
}
// Return the codomain shape of a mode
// @post size(coshape(@a a)) == cosize(@a a)
// @return C Coordinate with smallest elements such that
// @a elem_less(sub_layout(c), C) for all c < size(@a sub_layout)
// where sub_layout = get<Is...>(layout).
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
coshape(Layout<Shape,Stride> const& layout)
{
// Protect against negative strides
auto abs_sub_layout = make_layout(shape<Is...>(layout),
transform_leaf(stride<Is...>(layout), abs_fn{}));
auto co_coord = as_arithmetic_tuple(abs_sub_layout(size(abs_sub_layout) - Int<1>{}));
return co_coord + repeat_like(co_coord, Int<1>{});
}
// Return the codomain size of a mode
// @return M smallest integer such that
// @a sub_layout(c) < M for all c < size(@a sub_layout)
// where sub_layout = get<Is...>(layout).
template <int... Is, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
cosize(Layout<Shape,Stride> const& layout)
{
return size(coshape<Is...>(layout));
}
template <class Layout>
using cosize_t = decltype(cosize(declval<Layout>()));
template <class Layout>
static constexpr int cosize_v = cosize_t<Layout>::value;
// With crd2idx(coord, shape), makes sense to have crd2idx(coord, Layout) as well
template <class Coord, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
crd2idx(Coord const& c, Layout<Shape,Stride> const& layout)
{
return crd2idx(c, layout.shape(), layout.stride());
}
//
// Slice and Dice a layout
//
template <class Coord, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
slice(Coord const& c, Layout<Shape,Stride> const& layout)
{
return make_layout(slice(c, layout.shape()),
slice(c, layout.stride()));
}
template <class Coord, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
slice_and_offset(Coord const& c, Layout<Shape,Stride> const& layout)
{
return cute::make_tuple(slice(c, layout), crd2idx(c, layout));
}
template <class Coord, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
dice(Coord const& c, Layout<Shape,Stride> const& layout)
{
return make_layout(dice(c, layout.shape()),
dice(c, layout.stride()));
}
// Compute a pointer offset and (potentially modified) layout from a coordinate
// This exists so it can be overloaded for ComposedLayout
template <class Coord, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
domain_offset(Coord const& coord, Layout<Shape,Stride> const& layout)
{
return cute::make_tuple(layout, layout(coord));
}
//
// Transform the modes of a layout
//
namespace detail {
template <class Tuple, class F, int... I>
CUTE_HOST_DEVICE constexpr
auto
transform_layout(Tuple const& t, F&& f, seq<I...>)
{
return make_layout(f(get<I>(t))...);
}
template <class Tuple0, class Tuple1, class F, int... I, int... I0, int... I1>
CUTE_HOST_DEVICE constexpr
auto
transform_layout(Tuple0 const& t0, Tuple1 const& t1, F&& f, seq<I...>, seq<I0...>, seq<I1...>)
{
return make_layout(f(get<I>(t0),get<I>(t1))..., get<I0>(t0)..., get<I1>(t1)...);
}
} // end namespace detail
template <class Tuple, class F>
CUTE_HOST_DEVICE constexpr
auto
transform_layout(Tuple const& t, F&& f)
{
return detail::transform_layout(t, f, make_seq<decltype(rank(t))::value>{});
}
template <class Tuple0, class Tuple1, class F>
CUTE_HOST_DEVICE constexpr
auto
transform_layout(Tuple0 const& t0, Tuple1 const& t1, F&& f)
{
constexpr int R0 = decltype(rank(t0))::value;
constexpr int R1 = decltype(rank(t1))::value;
constexpr int R = (R0 < R1) ? R0 : R1;
return detail::transform_layout(t0, t1, f, make_seq<R>{}, make_range<R,R0>{}, make_range<R,R1>{});
}
//
// Coalesce and Filter
//
namespace detail {
// Look at each element and the front of the stack (in order of priority)
// front(NewLayout) get<I>(Layout)
// s0:d0 _1:d1 => continue
// _1:d0 s1:d1 => replace_front s1:d1
// s0:s1*d1 s1:d1 => replace_front s0*s1:d1
// s0:d0 s1:d1 => prepend s1:d1
//
// @pre OldShape and OldStride are flat
template <int I, class OldShape, class OldStride, class NewShape, class NewStride>
CUTE_HOST_DEVICE constexpr
auto
bw_coalesce(OldShape const& old_shape, OldStride const& old_stride,
NewShape const& new_shape, NewStride const& new_stride)
{
if constexpr (I == -1) {
// Base case, we're done
if constexpr (is_constant<1, NewShape>::value) {
return Layout<_1,_0>{};
} else {
return Layout<NewShape,NewStride>{new_shape,new_stride};
}
} else if constexpr (is_constant<1, decltype(get<I>(old_shape))>::value) {
// shape<I>(layout) == _1, skip it and continue
return bw_coalesce<I-1>(old_shape, old_stride, new_shape, new_stride);
} else if constexpr (is_constant<1, NewShape>::value) {
// Replace our shape-1 with anything (Can only happen on input new_shape/new_stride)
return bw_coalesce<I-1>(old_shape, old_stride, get<I>(old_shape), get<I>(old_stride));
} else if constexpr (is_static<decltype(get<0>(new_shape))>::value &&
is_constant<true, decltype(get<I>(old_shape) * get<I>(old_stride) == get<0>(new_stride))>::value) {
// Merge modes because the shapes and strides match
return bw_coalesce<I-1>(old_shape, old_stride,
replace_front(new_shape, get<I>(old_shape) * get<0>(new_shape)),
replace_front(new_stride, get<I>(old_stride)));
} else {
// Can't replace or merge, so prepend a new mode
return bw_coalesce<I-1>(old_shape, old_stride,
prepend(new_shape, get<I>(old_shape)),
prepend(new_stride, get<I>(old_stride)));
}
CUTE_GCC_UNREACHABLE;
}
// cute::coalesce promises to not change the Layout as a function from integers to codomain.
// It accomplishes this inside of the Layout's domain, but not always outside of the domain.
// Example: (_4,_1):(_1,_0) coalesces to _4:_1.
// detail::coalesce_x preserves the Layout function inside its domain and outside.
//
// @post depth(@a result) <= 1
// @post for all i, 0 <= i, @a layout(i) == @a result(i)
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
coalesce_x(Layout<Shape,Stride> const& layout)
{
auto flat_shape = flatten(layout.shape());
auto flat_stride = flatten(layout.stride());
constexpr int R = decltype(rank(flat_shape))::value;
if constexpr (is_constant<1, decltype(get<R-1>(flat_shape))>::value) {
return detail::bw_coalesce<R-2>(flat_shape, flat_stride, Int<2>{}, get<R-1>(flat_stride));
} else {
return detail::bw_coalesce<R-2>(flat_shape, flat_stride, get<R-1>(flat_shape), get<R-1>(flat_stride));
}
}
// Apply coalesce_x at the terminals of trg_profile
template <class Shape, class Stride, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
coalesce_x(Layout<Shape,Stride> const& layout, IntTuple const& trg_profile)
{
if constexpr (is_tuple<IntTuple>::value) {
static_assert(tuple_size<IntTuple>::value <= Layout<Shape,Stride>::rank);
return cute::transform_layout(layout, trg_profile, [](auto const& l, auto const& t) { return coalesce_x(l,t); });
} else {
return coalesce_x(layout);
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
// "Simplify" the layout by combining modes that are possible to combine
// Does not respect the shape of the layout, but does preserve total size
// @post size(@a result) == size(@a layout)
// @post depth(@a result) <= 1
// @post for all i, 0 <= i < size(@a layout), @a layout(i) == @a result(i)
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
coalesce(Layout<Shape,Stride> const& layout)
{
auto flat_shape = flatten(layout.shape());
auto flat_stride = flatten(layout.stride());
constexpr int R = decltype(rank(flat_shape))::value;
return detail::bw_coalesce<R-2>(flat_shape, flat_stride, get<R-1>(flat_shape), get<R-1>(flat_stride));
}
// Apply coalesce at the terminals of trg_profile
template <class Shape, class Stride, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
coalesce(Layout<Shape,Stride> const& layout, IntTuple const& trg_profile)
{
if constexpr (is_tuple<IntTuple>::value) {
static_assert(tuple_size<IntTuple>::value <= Layout<Shape,Stride>::rank);
return transform_layout(layout, trg_profile, [](auto const& l, auto const& t) { return coalesce(l,t); });
} else {
return coalesce(layout);
}
CUTE_GCC_UNREACHABLE;
}
// Combine static and dynamic modes of a shape.
// @post size(@a result) == size(@a shape)
// @post depth(@a result) <= 1
template <class Shape>
CUTE_HOST_DEVICE constexpr
auto
coalesce(Shape const& shape)
{
static_assert(is_integral<Shape>::value || is_tuple<Shape>::value);
return cute::fold_first(flatten(shape), [](auto const& init, auto const& a) {
if constexpr (is_static<decltype(back(init))>::value == is_static<decltype(a)>::value) {
return replace_back(init, back(init) * a); // Both static or both dynamic, coalesce and replace
} else {
return append(init, a); // Can't coalesce, so append
}
});
}
// Replace the modes in layout that have a 0-stride with a 1-size
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
filter_zeros(Layout<Shape,Stride> const& layout)
{
return make_layout(filter_zeros(layout.stride(), layout.shape()), layout.stride());
}
// Remove all of the 0-strides and 1-sizes
// Return 1-shape if empty
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
filter(Layout<Shape,Stride> const& layout)
{
return coalesce(filter_zeros(layout));
}
// Apply filter at the terminals of trg_profile
template <class Shape, class Stride, class IntTuple>
CUTE_HOST_DEVICE constexpr
auto
filter(Layout<Shape,Stride> const& layout, IntTuple const& trg_profile)
{
if constexpr (is_tuple<IntTuple>::value) {
static_assert(tuple_size<IntTuple>::value <= Layout<Shape,Stride>::rank);
return transform_layout(layout, trg_profile, [](auto const& l, auto const& t) { return filter(l,t); });
} else {
return filter(layout);
}
CUTE_GCC_UNREACHABLE;
}
//
// Append, Prepend, Replace
//
template <int N, class ShapeA, class StrideA, class ShapeX = _1, class StrideX = _0>
CUTE_HOST_DEVICE constexpr
auto
append(Layout<ShapeA,StrideA> const& layout,
Layout<ShapeX,StrideX> const& x = {})
{
return make_layout(append<N>(layout.shape(), x.shape()),
append<N>(layout.stride(), x.stride()));
}
template <class ShapeA, class StrideA, class ShapeX = _1, class StrideX = _0>
CUTE_HOST_DEVICE constexpr
auto
append(Layout<ShapeA,StrideA> const& layout,
Layout<ShapeX,StrideX> const& x = {})
{
return make_layout(append(layout.shape(), x.shape()),
append(layout.stride(), x.stride()));
}
template <int N, class ShapeA, class StrideA, class ShapeX = _1, class StrideX = _0>
CUTE_HOST_DEVICE constexpr
auto
prepend(Layout<ShapeA,StrideA> const& layout,
Layout<ShapeX,StrideX> const& x = {})
{
return make_layout(prepend<N>(layout.shape(), x.shape()),
prepend<N>(layout.stride(), x.stride()));
}
template <class ShapeA, class StrideA, class ShapeX = _1, class StrideX = _0>
CUTE_HOST_DEVICE constexpr
auto
prepend(Layout<ShapeA,StrideA> const& layout,
Layout<ShapeX,StrideX> const& x = {})
{
return make_layout(prepend(layout.shape(), x.shape()),
prepend(layout.stride(), x.stride()));
}
template <int N, class ShapeA, class StrideA, class ShapeX, class StrideX>
CUTE_HOST_DEVICE constexpr
auto
replace(Layout<ShapeA,StrideA> const& layout,
Layout<ShapeX,StrideX> const& x)
{
return make_layout(replace<N>(layout.shape(), x.shape()),
replace<N>(layout.stride(), x.stride()));
}
template <int B, int E, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
group(Layout<Shape,Stride> const& layout)
{
return make_layout(group<B,E>(layout.shape()),
group<B,E>(layout.stride()));
}
//
// Composition of two layouts: lhs o rhs
// @post compatible(rhs, result)
// @post result(c) = lhs(rhs(c))
// for all c in the domain of rhs
//
namespace detail {
template <class LShape, class LStride,
class RShape, class RStride>
CUTE_HOST_DEVICE constexpr
auto
composition_impl(LShape const& lhs_shape, LStride const& lhs_stride,
RShape const& rhs_shape, RStride const& rhs_stride)
{
if constexpr (is_tuple<RShape>::value) {
// Apply the right-distributivity of Layout composition
return transform_layout(rhs_shape, rhs_stride, [&](auto const& s, auto const& d) {
return composition_impl(lhs_shape, lhs_stride, s, d);
});
} else
if constexpr (is_scaled_basis<RStride>::value) {
// Special case for a ScaledBasis stride
return composition_impl(basis_get(rhs_stride, lhs_shape), basis_get(rhs_stride, lhs_stride),
rhs_shape, basis_value(rhs_stride));
} else
if constexpr (is_constant<0, RStride>::value) {
// Special case shortcut for any static stride-0
return Layout<RShape, RStride>{rhs_shape, rhs_stride};
} else
if constexpr (is_integral<decltype(lhs_shape)>::value) {
// Special case shortcut for any integral LShape
return Layout{rhs_shape, rhs_stride * lhs_stride};
} else
if constexpr (is_constant<1, RStride>::value) {
// Special case shortcut for any static stride-1
constexpr int R = rank_v<LShape>;
auto result_shape_0 = take<0,R-1>(lhs_shape);
// Mod out the rhs_shape from the lhs_shape
auto const [result_shape_1, rest_shape] = fold(result_shape_0, cute::make_tuple(cute::make_tuple(), rhs_shape),
[] (auto const& init, auto const& si) {
return cute::make_tuple(append(get<0>(init), shape_min(abs(si), get<1>(init))), shape_div(get<1>(init), abs(si)));
});
// Jump into coalesce and append (rest_shape, get<R-1>(lhs_stride))
return detail::bw_coalesce<R-2>(result_shape_1, lhs_stride, rest_shape, get<R-1>(lhs_stride));
} else {
// General case: integral RShape and RStride, tuple LShape and LStride
constexpr int R = rank_v<LShape>;
auto result_shape_0 = take<0,R-1>(lhs_shape);
auto result_stride_0 = take<0,R-1>(lhs_stride);
// Divide out the rhs_stride from the lhs_shape
auto const [result_shape_1, rest_stride] = fold(result_shape_0, cute::make_tuple(cute::make_tuple(), rhs_stride),
[] (auto const& init, auto const& di) {
return cute::make_tuple(append(get<0>(init), shape_div(di, get<1>(init))), shape_div(get<1>(init), di));
});
// Apply any lhs_shape changes to the stride
auto result_stride_1 = elem_scale(result_stride_0, shape_div(result_shape_0, result_shape_1));
// Mod out the rhs_shape from the lhs_shape
auto const [result_shape_2, rest_shape] = fold(result_shape_1, cute::make_tuple(cute::make_tuple(), rhs_shape),
[] (auto const& init, auto const& si) {
return cute::make_tuple(append(get<0>(init), shape_min(abs(si), get<1>(init))), shape_div(get<1>(init), abs(si)));
});
// Jump into coalesce and append (rest_shape, rest_stride * get<R-1>(lhs_stride))
return detail::bw_coalesce<R-2>(result_shape_2, result_stride_1, rest_shape, rest_stride * get<R-1>(lhs_stride));
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
template <class LShape, class LStride,
class RShape, class RStride>
CUTE_HOST_DEVICE constexpr
auto
composition(Layout<LShape,LStride> const& lhs,
Layout<RShape,RStride> const& rhs)
{
auto coprofile = repeat_like(decltype(coshape(rhs)){}, Int<0>{});
auto flat_lhs = detail::coalesce_x(lhs, coprofile);
return detail::composition_impl(flat_lhs.shape(), flat_lhs.stride(), rhs.shape(), rhs.stride());
}
template <class LShape, class LStride, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
composition(Layout<LShape,LStride> const& lhs,
Tiler const& rhs)
{
if constexpr (is_tuple<Tiler>::value) {
static_assert(tuple_size<Tiler>::value <= Layout<LShape,LStride>::rank);
// Drop any modes of lhs that aren't hit by rhs
return detail::transform_layout(lhs, rhs, [](auto const& l, auto const& r) { return composition(l,r); }, make_seq<tuple_size<Tiler>::value>{}, seq<>{}, seq<>{});
} else if constexpr (is_underscore<Tiler>::value) {
return lhs;
} else if constexpr (is_integral<Tiler>::value) {
auto flat_lhs = detail::coalesce_x(lhs);
return detail::composition_impl(flat_lhs.shape(), flat_lhs.stride(), rhs, Int<1>{});
}
CUTE_GCC_UNREACHABLE;
}
//
// Complement
//
// Build the complement of a layout.
// @post size(@a result) >= @a cosize_hi / size(filter(@a layout)));
// @post For all i in [1,size(@a result)),
// @a result(i) < @a result(i-1)
// For all j in [0, size(@a layout)),
// @a result(i) != @a layout(j)
//
namespace detail {
// @pre @a layout has been filtered (flattened and no stride-0 or size-1 modes).
template <class Shape, class Stride, class CoTarget>
CUTE_HOST_DEVICE constexpr
auto
complement(Shape const& shape, Stride const& stride, CoTarget const& cotarget)
{
if constexpr (is_constant<0, Stride>::value) {
// Special case for irreducible rank-1 stride-0 layout
return make_layout(coalesce(cotarget));
} else {
// General case
constexpr int R = rank_v<Shape>;
static_assert(R == 1 || is_static<Stride>::value,
"Dynamic-stride complement only for rank-1 layouts");
// Should just be a sort and a fold...
// Then we could even handle dynamic strides (but they would destroy all static strides)
auto [shape_, stride_, result_shape_, result_stride] =
fold(make_seq<R-1>{},
cute::make_tuple(shape, stride, cute::make_tuple(), cute::make_tuple(Int<1>{})),
[](auto const& init, auto i)
{
auto [shape, stride, result_shape, result_stride] = init;
auto min_stride = cute::min(stride);
auto min_idx = cute::find(stride, min_stride);
auto new_shape = min_stride / get<i>(result_stride);
auto new_stride = min_stride * get<min_idx>(shape);
static_assert(not is_constant<0, decltype(new_shape)>::value, "Non-injective Layout detected in complement.");
return cute::make_tuple(remove<min_idx>(shape), // Remove the min_idx from shape
remove<min_idx>(stride), // Remove the min_idx from stride
append(result_shape , new_shape ), // new shape = min_stride / last_stride
append(result_stride, new_stride)); // new stride = min_stride * curr_shape
});
// Append the last shape mode
auto new_shape = get<0>(stride_) / get<R-1>(result_stride); // new shape = min_stride / last_stride
static_assert(not is_constant<0, decltype(new_shape)>::value, "Non-injective Layout detected in complement.");
auto result_shape = append(result_shape_, new_shape);
// Compute the rest_shape and rest_stride
auto new_stride = get<0>(stride_) * get<0>(shape_); // new stride = min_stride * curr_shape
auto rest_shape = coalesce(ceil_div(cotarget, new_stride));
auto rest_stride = compact_major<LayoutLeft>(rest_shape, new_stride);
// Coalesce and append (rest_shape, rest_stride)
return coalesce(make_layout(make_shape (result_shape , rest_shape ),
make_stride(result_stride, rest_stride)));
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
template <class Shape, class Stride, class CoTarget>
CUTE_HOST_DEVICE constexpr
auto
complement(Layout<Shape,Stride> const& layout, CoTarget const& cotarget)
{
auto filter_layout = filter(layout);
return detail::complement(filter_layout.shape(), filter_layout.stride(), shape(cotarget));
}
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
complement(Layout<Shape,Stride> const& layout)
{
auto filter_layout = filter(layout);
return detail::complement(filter_layout.shape(), filter_layout.stride(), cosize(filter_layout));
}
//
// Right-Inverse and Left-Inverse
//
namespace detail {
template <int NextStride, class Shape, class Stride, int... Is>
CUTE_HOST_DEVICE constexpr
auto
inverse_seq(Shape const& shape, Stride const& stride, seq<Is...>)
{
auto next_I = cute::find_if(stride, [](auto a) { return is_constant<NextStride, decltype(a)>{}; });
if constexpr (next_I == decltype(rank(stride))::value) {
// If not found, return current seq
return seq<Is...>{};
} else {
// auto next_stride = get<next_I>(shape) * get<next_I>(stride);
// NOTE: Needed for g++-7
using next_stride = decltype(get<next_I>(shape) * get<next_I>(stride));
if constexpr (is_static<next_stride>::value && !is_constant<NextStride, next_stride>::value) {
// If next_stride is static and unique, then continue
return inverse_seq<next_stride::value>(shape, stride, seq<Is..., next_I>{});
} else {
// Else return current seq + next_I
return seq<Is..., next_I>{};
}
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
//
// Build the right-inverse of a layout
// @pre is_static<Layout>
// @result A layout @a result such that
// @a layout(@a result(i)) == i for all i < size(@a result)
// @result A layout @a result such that
// composition(@a layout, @a result) is identical to make_layout(shape(result))
//
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
right_inverse(Layout<Shape,Stride> const& layout)
{
auto flat_layout = coalesce(layout);
auto astride = transform_leaf(flat_layout.stride(), abs_fn{});
// Find Int<1>{}, the starting stride, and follow the strides to gen inverse_seq
[[maybe_unused]] auto iseq = detail::inverse_seq<1>(flat_layout.shape(), astride, seq<>{});
if constexpr (iseq.size() == 0) {
return Layout<_1,_0>{}; // Empty case, nothing found
} else {
// Generate the corresponding new strides and construct
auto rstride = compact_major<LayoutLeft>(flat_layout.shape());
return make_layout(unwrap(transform(iseq, [&](auto i) { return shape<i>(flat_layout); })),
unwrap(transform(iseq, [&](auto i) { return signum(stride<i>(flat_layout)) * get<i>(rstride); })));
}
CUTE_GCC_UNREACHABLE;
}
CUTE_HOST_DEVICE constexpr
auto
right_inverse(Underscore const& _)
{
return _;
}
//
// Build the left-inverse of a layout
// @pre is_static<Layout>
// @pre @a layout is an injective function
// @result A layout @a result such that
// @a result(@a layout(i)) == i for all i < size(@a layout)
// @result A layout @a result such that
// composition(@a result, @a layout) is identical to make_layout(shape(layout))
//
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
left_inverse(Layout<Shape,Stride> const& layout)
{
return right_inverse(make_layout(layout, complement(layout)));
}
CUTE_HOST_DEVICE constexpr
auto
left_inverse(Underscore const& _)
{
return _;
}
//
// Max Common Layout
//
/* Return a layout that points to the maximum number of contiguous elements
* that logically correspond in the layouts of @a a and @a b.
*
* @returns Layout R
* @post For all 0 <= i < size(R), a(R(i)) == i and b(R(i)) == i
*/
template <class ShapeA, class StrideA,
class ShapeB, class StrideB>
CUTE_HOST_DEVICE constexpr
auto
max_common_layout(Layout<ShapeA,StrideA> const& a,
Layout<ShapeB,StrideB> const& b)
{
Layout inv_b = right_inverse(b);
Layout common = coalesce(composition(a, inv_b));
// Keep only the static identity component of the common layout
if constexpr (is_static<decltype(shape<0>(common))>::value &&
is_constant<1, decltype(stride<0>(common))>::value) {
// Truncate to the size of the contiguous vector (static stride-1 mode)
return composition(inv_b, layout<0>(common));
} else {
return Layout<_1,_0>{};
}
}
/* Return Int<N> such that N is the maximum number of contiguous elements
* that logically correspond in the layouts of @a a and @a b.
*
* @returns Int<N> with N >= 1
* @post For all 0 <= n < N, a(b.get_1d_coord(n)) == n
* (NOTE: Problems with negative strides/coords in this post-condition)
*/
template <class ShapeA, class StrideA,
class ShapeB, class StrideB>
CUTE_HOST_DEVICE constexpr
auto
max_common_vector(Layout<ShapeA,StrideA> const& a,
Layout<ShapeB,StrideB> const& b)
{
Layout common = coalesce(composition(a, right_inverse(b)));
// Keep only the static identity component of the common layout
if constexpr (is_static<decltype(shape<0>(common))>::value &&
is_constant<1, decltype(stride<0>(common))>::value) {
// Truncate to the size of the contiguous vector (static stride-1 mode)
return shape<0>(common);
} else {
return Int<1>{};
}
CUTE_GCC_UNREACHABLE;
}
/* Return a layout that distributes ShapeB over ShapeA.
*
* @returns Layout result
* @post softly_compatible(@a b, @a result)
* @post For all i,j in [0,size(@a result)) with i < j, @a result(i) < @a result(j). Surjective and Ordered.
* @post composition(make_layout(shape(@a a)), @a result) is admissible
* \code
* // Note that 6 does not divide this shape
* Layout layoutA = Layout<Shape<Int<15>,Int<14>>>{};
*
* // Want to tile any 6 elements and don't care where they come from
* Layout dist = domain_distribute(layoutA, Int<6>{}); // (_3,_2):(_1,_15)
*
* // Not guaranteed to find all 6 though...
* CUTE_STATIC_ASSERT_V(Int<6>{} == size(dist));
*
* Layout result = zipped_divide(layoutA, dist); // (_6,Rest)
* \endcode
*/
template <class ShapeA, class ShapeB>
CUTE_HOST_DEVICE constexpr
auto
domain_distribute(ShapeA const& a, ShapeB const& b)
{
static_assert(is_integral<ShapeB>::value);
static_assert(is_static<ShapeB>::value);
auto flat_shape_a = flatten(shape(a));
static_assert(is_static<decltype(flat_shape_a)>::value);
// Compute the shape of the result
auto [result_shape, b_rest] = cute::fold(flat_shape_a, cute::make_tuple(cute::tuple<>{}, size(b)), [](auto init, auto a_) {
auto [result, b_] = init;
auto gcd_ = gcd(a_, b_);
return cute::make_tuple(append(result, gcd_), b_ / gcd_);
});
// Compute the stride of the result
auto result_stride = compact_major<LayoutLeft>(flat_shape_a);
return coalesce(make_layout(result_shape, result_stride));
}
//
// Kernel (Nullspace) of a Layout
//
namespace detail {
template <int NextI, class Stride, int... Is>
CUTE_HOST_DEVICE constexpr
auto
nullspace_seq(Stride const& stride, seq<Is...>)
{
if constexpr (NextI == rank_v<Stride>) {
return seq<Is...>{};
} else
if constexpr (is_constant<0, decltype(get<NextI>(stride))>::value) {
return detail::nullspace_seq<NextI+1>(stride, seq<Is..., NextI>{});
} else {
return detail::nullspace_seq<NextI+1>(stride, seq<Is...>{});
}
CUTE_GCC_UNREACHABLE;
}
} // end namespace detail
//
// Build the nullspace of a layout
// @result A layout @a result such that
// size(@a result) == size(@a layout) / size(filter(@a layout))
// @a layout(@a result(i)) == 0 for all i < size(@a result)
//
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
nullspace(Layout<Shape,Stride> const& layout)
{
auto flat_layout = flatten(layout);
auto iseq = detail::nullspace_seq<0>(flat_layout.stride(), seq<>{});
if constexpr (iseq.size() == 0) {
return Layout<_1,_0>{}; // Empty case, nothing found
} else {
// Generate the corresponding new strides and construct
auto rstride = compact_major<LayoutLeft>(flat_layout.shape());
return make_layout(unwrap(transform(iseq, [&](auto i) { return shape<i>(flat_layout); })),
unwrap(transform(iseq, [&](auto i) { return get<i>(rstride); })));
}
CUTE_GCC_UNREACHABLE;
}
//
// Zip
//
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
zip(Layout<Shape,Stride> const& layout)
{
return make_layout(zip(layout.shape()),
zip(layout.stride()));
}
template <class TShape, class TStride,
class UShape, class UStride>
CUTE_HOST_DEVICE constexpr
auto
zip(Layout<TShape,TStride> const& layoutA,
Layout<UShape,UStride> const& layoutB)
{
return make_layout(zip(layoutA.shape(), layoutB.shape()),
zip(layoutA.stride(), layoutB.stride()));
}
//
// Tile unzip
// Logical product and logical divide (on layouts) produce rank-2 results by design.
// Follow the profile of @a tile and zip the rank-2 modes located at the terminals into
// their own mode.
//
template <class LShape, class LStride, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
tile_unzip(Layout<LShape,LStride> const& layout,
Tiler const& tiler)
{
return make_layout(zip2_by(layout.shape(), tiler),
zip2_by(layout.stride(), tiler));
}
//
// Logical divide
//
template <class LShape, class LStride,
class TShape, class TStride>
CUTE_HOST_DEVICE constexpr
auto
logical_divide(Layout<LShape,LStride> const& layout,
Layout<TShape,TStride> const& tiler)
{
return composition(layout, make_layout(tiler, complement(tiler, shape(layout))));
}
template <class LShape, class LStride, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
logical_divide(Layout<LShape,LStride> const& layout,
Tiler const& tiler)
{
if constexpr (is_tuple<Tiler>::value) {
static_assert(tuple_size<Tiler>::value <= Layout<LShape,LStride>::rank, "logical_divide: Too many modes in tiler.");
return transform_layout(layout, tiler, [](auto const& l, auto const& t) { return logical_divide(l,t); });
} else if constexpr (is_underscore<Tiler>::value) {
return layout;
} else if constexpr (is_integral<Tiler>::value) {
return logical_divide(layout, make_layout(tiler));
}
CUTE_GCC_UNREACHABLE;
}
// Generalization of ceil_div for Layout lhs
// is effectively the "rest mode" of logical_divide.
// Occurs in the calculation of gridDim, for example, for generalized tilers
// Example:
// dim3 gridDim(size(ceil_div(problem_shape_M, cta_tiler_M)),
// size(ceil_div(problem_shape_N, cta_tiler_N)));
// This does not consider compositional acceptance, so it may be the case that
// ceil_div produces a result while logical_divide (and friends) do not.
template <class Target, class TShape, class TStride>
CUTE_HOST_DEVICE constexpr
auto
ceil_div(Target const& target,
Layout<TShape,TStride> const& tiler)
{
return shape(complement(tiler, shape(target)));
}
//
// Convenience operator
// that produces layouts like ((BLK_A,BLK_B,...),(a,b,...,x,y))
// by gathering the tile modes and residuals into a rank-2 result.
//
template <class LShape, class LStride,
class Tiler>
CUTE_HOST_DEVICE constexpr
auto
zipped_divide(Layout<LShape,LStride> const& layout,
Tiler const& tiler)
{
return tile_unzip(logical_divide(layout, tiler), tiler);
}
// Same as zipped_divide, but unpacks the second mode: ((BLK_A,BLK_B,...),a,b,...,x,y)
template <class LShape, class LStride,
class Tiler>
CUTE_HOST_DEVICE constexpr
auto
tiled_divide(Layout<LShape,LStride> const& layout,
Tiler const& tiler)
{
auto result = zipped_divide(layout, tiler);
auto R1 = rank<1>(result);
return result(_, repeat<R1>(_));
}
// Same as zipped_divide, but unpacks both modes: (BLK_A,BLK_B,...,a,b,...,x,y)
template <class LShape, class LStride,
class Tiler>
CUTE_HOST_DEVICE constexpr
auto
flat_divide(Layout<LShape,LStride> const& layout,
Tiler const& tiler)
{
auto result = zipped_divide(layout, tiler);
auto R0 = rank<0>(result);
auto R1 = rank<1>(result);
return result(repeat<R0>(_), repeat<R1>(_));
}
//
// Logical product
//
template <class LShape, class LStride,
class TShape, class TStride>
CUTE_HOST_DEVICE constexpr
auto
logical_product(Layout<LShape,LStride> const& block,
Layout<TShape,TStride> const& tiler)
{
return make_layout(block, composition(complement(block, size(block)*cosize(tiler)), tiler));
}
template <class LShape, class LStride, class Tiler>
CUTE_HOST_DEVICE constexpr
auto
logical_product(Layout<LShape,LStride> const& block,
Tiler const& tiler)
{
if constexpr (is_tuple<Tiler>::value) {
static_assert(tuple_size<Tiler>::value <= Layout<LShape,LStride>::rank, "logical_product: Too many modes in tiler.");
return transform_layout(block, tiler, [](auto const& l, auto const& t) { return logical_product(l,t); });
} else if constexpr (is_underscore<Tiler>::value) {
return block;
} else if constexpr (is_integral<Tiler>::value) {
return logical_product(block, make_layout(tiler));
}
CUTE_GCC_UNREACHABLE;
}
//
// Convenience operator
// that produces layouts like ((BLK_A,BLK_B,...),(a,b,...,x,y))
// by gathering the block modes and products into a rank-2 result.
//
template <class LShape, class LStride,
class Tiler>
CUTE_HOST_DEVICE constexpr
auto
zipped_product(Layout<LShape,LStride> const& block,
Tiler const& tiler)
{
return tile_unzip(logical_product(block, tiler), tiler);
}
// Same as zipped_product, but unpacks the second mode: ((BLK_A,BLK_B,...),a,b,...,x,y)
template <class LShape, class LStride,
class Tiler>
CUTE_HOST_DEVICE constexpr
auto
tiled_product(Layout<LShape,LStride> const& block,
Tiler const& tiler)
{
auto result = zipped_product(block, tiler);
auto R1 = rank<1>(result);
return result(_, repeat<R1>(_));
}
// Same as zipped_product, but unpacks both modes: (BLK_A,BLK_B,...,a,b,...,x,y)
template <class LShape, class LStride,
class Tiler>
CUTE_HOST_DEVICE constexpr
auto
flat_product(Layout<LShape,LStride> const& block,
Tiler const& tiler)
{
auto result = zipped_product(block, tiler);
auto R0 = rank<0>(result);
auto R1 = rank<1>(result);
return result(repeat<R0>(_), repeat<R1>(_));
}
//
// Rank-sensitive products
//
// blocked_product -- Reproduce a block over a tiler.
// Think of every element of "tiler" as a "block"
// and return the layout of the resulting structure.
// @post rank(@a result) == cute::max(rank(@a block), rank(@a tiler))
template <class TShape, class TStride,
class UShape, class UStride>
CUTE_HOST_DEVICE constexpr
auto
blocked_product(Layout<TShape,TStride> const& block,
Layout<UShape,UStride> const& tiler)
{
constexpr int R = cute::max(rank_v<TShape>, rank_v<UShape>);
auto result = logical_product(append<R>(block), append<R>(tiler));
return coalesce(zip(get<0>(result), get<1>(result)), tuple_repeat<R>(Int<1>{}));
}
// raked_product -- Reproduce a block over a tiler with block-interleaving.
// Think of every element of "tiler" as a "block", interleave those blocks,
// and return the layout of the resulting structure.
// @post rank(@a result) == cute::max(rank(@a block), rank(@a tiler))
template <class TShape, class TStride,
class UShape, class UStride>
CUTE_HOST_DEVICE constexpr
auto
raked_product(Layout<TShape,TStride> const& block,
Layout<UShape,UStride> const& tiler)
{
constexpr int R = cute::max(rank_v<TShape>, rank_v<UShape>);
auto result = logical_product(append<R>(block), append<R>(tiler));
return coalesce(zip(get<1>(result), get<0>(result)), tuple_repeat<R>(Int<1>{}));
}
// tile_to_shape -- Perform a product of a layout so that the result matches a target shape.
// This is similar to blocked_product, but specifies the result shape instead of the
// product shape, which is more convenient in certain circumstances.
// @param block The layout to repeat
// @param trg_shape The target shape of the result
// @param ord_shape The order of the modes of @a trg_shape to tile @a layout with.
// Defaults to GenColMajor, so @a layout will repeat
// across the first mode first, the second mode second, etc
// E.g. Step<_2,_1,_3> will cause @a layout to repeat
// across the second mode first, the first mode second, and the third mode last.
// @pre rank(@a block) <= rank(@a trg_shape)
// @post compatible(@a trg_shape, shape(@a result))
template <class Shape, class Stride,
class TrgShape, class ModeOrder = LayoutLeft>
CUTE_HOST_DEVICE constexpr
auto
tile_to_shape(Layout<Shape,Stride> const& block,
TrgShape const& trg_shape,
ModeOrder const& ord_shape = {})
{
CUTE_STATIC_ASSERT_V(rank(block) <= rank(trg_shape), "Rank of layout must be <= rank of target shape.");
constexpr int R = rank_v<TrgShape>;
auto padded_block = append<R>(block);
auto block_shape = product_each(shape(padded_block));
auto target_shape = product_each(shape(trg_shape));
// Assert proper division
if constexpr (is_static<decltype(target_shape)>::value) {
CUTE_STATIC_ASSERT_V(weakly_compatible(block_shape, target_shape),
"tile_to_shape: block shape does not divide the target shape.");
}
auto product_shape = ceil_div(target_shape, block_shape);
return coalesce(blocked_product(padded_block, make_ordered_layout(product_shape, ord_shape)), product_shape);
}
//
// Upcast
// For stride-1 mode, divide size by N. Divide all other strides by N.
//
template <int N, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
upcast(Shape const& shape, Stride const& stride)
{
if constexpr (is_tuple<Shape>::value) { // tuple stride
return transform_layout(shape, stride, [](auto const& s, auto const& d) { return upcast<N>(s,d); });
} else if constexpr (is_constant<0, Stride>::value) { // static-0 stride
return Layout<Shape,Stride>{shape,stride};
} else if constexpr (is_static<Stride>::value) { // static stride
return make_layout(shape_div(shape, shape_div(Int<N>{}, abs(stride))),
shape_div(stride, Int<N>{}));
} else { // dynamic stride
// assume dynamic strides are larger than N and divisible
// assert(stride % N == 0);
return make_layout(shape, safe_div(stride, Int<N>{}));
}
CUTE_GCC_UNREACHABLE;
}
template <int N, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
upcast(Layout<Shape,Stride> const& layout)
{
return upcast<N>(layout.shape(), layout.stride());
}
//
// Downcast
// For stride-1 mode, multiply size by N. Multiply all other strides by N.
//
template <int N, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
downcast(Shape const& shape, Stride const& stride)
{
if constexpr (is_tuple<Shape>::value) {
return transform_layout(shape, stride, [](auto const& s, auto const& d) { return downcast<N>(s,d); });
} else if constexpr (is_constant<1, Stride>::value || is_constant<-1, Stride>::value) {
return make_layout(shape * Int<N>{}, stride);
} else {
return make_layout(shape, stride * Int<N>{});
}
CUTE_GCC_UNREACHABLE;
}
template <int N, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
downcast(Layout<Shape,Stride> const& layout)
{
CUTE_STATIC_ASSERT(has_int1<Stride>::value, "Downcast requires adjacent elements");
return downcast<N>(layout.shape(), layout.stride());
}
//
// Recast
//
template <class OldType, class NewType,
class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
recast_layout(Layout<Shape,Stride> const& layout)
{
using scale = decltype(trait_ratio(sizeof_bits<NewType>{}, sizeof_bits<OldType>{}));
if constexpr (scale::num == 1 && scale::den == 1) {
return layout;
}
else if constexpr (scale::num == 1) {
return downcast<scale::den>(layout);
}
else if constexpr (scale::den == 1) {
return upcast<scale::num>(layout);
}
else {
static_assert(dependent_false<scale>, "Recast not supported.");
}
CUTE_GCC_UNREACHABLE;
}
// Determine the maximum alignment of a Layout.
// The maximum alignment is the largest N for which upcast<N>(layout) will compile.
// upcast<N>(layout) compiles when the static shapes and strides pass divisibility checks.
// Therefore, upcast<M>(layout) will also compile for all divisors M of N.
// Note that this only considers the static shapes and strides of the Layout
// in symmetry with upcast<N> only checking against static shapes and strides and assuming all
// dynamic shapes and strides are large and multiples of N.
template <class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
max_alignment(Layout<Shape,Stride> const& layout)
{
auto flat_layout = coalesce(layout);
auto static_shape = transform( shape(flat_layout), [](auto s){ return conditional_return<is_static<decltype(s)>::value>(s, Int<1>{}); });
auto static_stride = transform(stride(flat_layout), [](auto d){ return conditional_return<is_static<decltype(d)>::value>(d, Int<0>{}); });
auto filter_layout = make_layout(static_shape, static_stride);
auto permuted = logical_divide(filter_layout, right_inverse(filter_layout));
return gcd(size<0>(permuted), stride<1>(permuted));
}
//
// Display utilities
//
template <class Shape, class Stride>
CUTE_HOST_DEVICE void print(Layout<Shape,Stride> const& layout)
{
print(layout.shape()); print(":"); print(layout.stride());
}
#if !defined(__CUDACC_RTC__)
template <class Shape, class Stride>
CUTE_HOST std::ostream& operator<<(std::ostream& os, Layout<Shape,Stride> const& layout)
{
return os << shape(layout) << ":" << stride(layout);
}
#endif
// Generic 2D Layout to console table
template <class Layout>
CUTE_HOST_DEVICE
void
print_layout(Layout const& layout) // (m,n) -> idx
{
CUTE_STATIC_ASSERT_V(rank(layout) == Int<2>{});
int idx_width = num_digits(cosize(layout)) + 2;
const char* delim = "+-----------------------";
print(layout); print("\n");
// Column indices
print(" ");
for (int n = 0; n < size<1>(layout); ++n) { printf(" %*d ", idx_width-2, n); }
printf("\n");
// Print out A m-by-n
for (int m = 0; m < size<0>(layout); ++m) {
// Header
print(" ");
for (int n = 0; n < size<1>(layout); ++n) { printf("%.*s", idx_width+1, delim); }
printf("+\n");
// Values
printf("%2d ", m); // Row indices
for (int n = 0; n < size<1>(layout); ++n) { printf("| %*d ", idx_width-2, int(layout(m,n))); }
printf("|\n");
}
// Footer
print(" ");
for (int n = 0; n < size<1>(layout); ++n) { printf("%.*s", idx_width+1, delim); }
printf("+\n");
}
// Generic ThrVal 2D Layout to console table
template <class Layout, class ThrID>
CUTE_HOST_DEVICE
void
print_layout(Layout const& layout, ThrID const& thrid) // (m,n) -> (tid,vid) and tid -> thr_idx
{
CUTE_STATIC_ASSERT_V(rank(layout) == Int<2>{});
print(layout); print("\n");
print(thrid); print("\n");
// Print out m-by-n
for (int m = 0; m < size<0>(layout); ++m) {
// Header
for (int n = 0; n < size<1>(layout); ++n) printf("+------");
printf("+\n");
// Values
for (int n = 0; n < size<1>(layout); ++n) printf("|%03d-%02d", int(thrid(layout(m,n) % size(thrid))), int(layout(m,n) / size(thrid)));
printf("|\n");
}
// Footer
for (int n = 0; n < size<1>(layout); ++n) printf("+------");
printf("+\n");
}
// Generic 2D Layout to Latex printer -- B&W 8-value color coding
template <class LayoutA>
CUTE_HOST_DEVICE
void
print_latex(LayoutA const& layout_a)
{
CUTE_STATIC_ASSERT_V(rank(layout_a) <= Int<2>{});
auto layout = append<2>(layout_a, Layout<_1,_0>{});
char const* latex_header =
"\\documentclass[convert]{standalone}\n"
"\\usepackage{tikz}\n\n"
"\\begin{document}\n"
"\\begin{tikzpicture}[x={(0cm,-1cm)},y={(1cm,0cm)},box/.style={rectangle,draw=black,thick,minimum size=1cm,anchor=center,font=\\Large}]\n\n";
char const* latex_footer =
"\\end{tikzpicture}\n"
"\\end{document}\n";
char const* color_map[8] = {"black!00",
"black!40",
"black!20",
"black!60",
"black!10",
"black!50",
"black!30",
"black!70"};
// Header
printf("%% Layout: "); print(layout); printf("\n");
printf(latex_header);
// Layout
for (int i = 0; i < size<0>(layout); ++i) {
for (int j = 0; j < size<1>(layout); ++j) {
int idx = layout(i,j);
printf("\\node[box,fill=%s] at (%d,%d) {%d};\n",
color_map[idx % 8],
i, j,
idx);
}
}
// Labels
for (int i = 0, j = -1; i < size<0>(layout); ++i) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, i);
}
for (int j = 0, i = -1; j < size<1>(layout); ++j) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, j);
}
// Footer
printf(latex_footer);
}
// Generic ThrVal 2D Layout to Latex TIKZ -- 8-value color coded by thread
template <class Layout, class ThrID>
CUTE_HOST_DEVICE
void
print_latex(Layout const& layout, ThrID const& thr) // (m,n) -> (tid,vid) and tid -> thr_idx
{
CUTE_STATIC_ASSERT_V(rank(layout) == Int<2>{});
char const* latex_header =
"\\documentclass[convert]{standalone}\n"
"\\usepackage{tikz}\n\n"
"\\begin{document}\n"
"\\begin{tikzpicture}[x={(0cm,-1cm)},y={(1cm,0cm)},box/.style={rectangle,draw=black,thick,minimum size=1cm,anchor=center}]\n\n";
char const* latex_footer =
"\\end{tikzpicture}\n"
"\\end{document}\n";
char const* color_map[8] = {"{rgb,255:red,175;green,175;blue,255}",
"{rgb,255:red,175;green,255;blue,175}",
"{rgb,255:red,255;green,255;blue,175}",
"{rgb,255:red,255;green,175;blue,175}",
"{rgb,255:red,210;green,210;blue,255}",
"{rgb,255:red,210;green,255;blue,210}",
"{rgb,255:red,255;green,255;blue,210}",
"{rgb,255:red,255;green,210;blue,210}"};
// Header
printf("%% layout: "); print(layout); printf("\n");
printf("%% thrid: "); print(thr); printf("\n\n");
printf(latex_header);
// Layout
for (int i = 0; i < size<0>(layout); ++i) {
for (int j = 0; j < size<1>(layout); ++j) {
int thrid = layout(i,j) % size(thr);
int val_idx = layout(i,j) / size(thr);
int thr_idx = thr(thrid);
printf("\\node[box,fill=%s] at (%d,%d) {\\shortstack{T%d \\\\ V%d}};\n",
color_map[thr_idx % 8],
i, j,
thr_idx, val_idx);
}
}
// Labels
for (int i = 0, j = -1; i < size<0>(layout); ++i) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, i);
}
for (int j = 0, i = -1; j < size<1>(layout); ++j) {
printf("\\node at (%d,%d) {\\Large{\\texttt{%d}}};\n", i, j, j);
}
// Footer
printf(latex_footer);
}
} // end namespace cute
//
// Extended Layouts
//
#include <cute/swizzle_layout.hpp>
| include/cute/layout.hpp/0 | {
"file_path": "include/cute/layout.hpp",
"repo_id": "include",
"token_count": 25776
} | 15 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/container/tuple.hpp>
#include <cute/algorithm/tuple_algorithms.hpp>
#include <cute/numeric/integer_sequence.hpp>
#include <cute/numeric/integral_constant.hpp>
#include <cute/numeric/math.hpp>
namespace cute
{
// A generic Swizzle functor
/* 0bxxxxxxxxxxxxxxxYYYxxxxxxxZZZxxxx
* ^--^ MBase is the number of least-sig bits to keep constant
* ^-^ ^-^ BBits is the number of bits in the mask
* ^---------^ SShift is the distance to shift the YYY mask
* (pos shifts YYY to the right, neg shifts YYY to the left)
*
* e.g. Given
* 0bxxxxxxxxxxxxxxxxYYxxxxxxxxxZZxxx
* the result is
* 0bxxxxxxxxxxxxxxxxYYxxxxxxxxxAAxxx where AA = ZZ xor YY
*/
template <int BBits, int MBase, int SShift = BBits>
struct Swizzle
{
static constexpr int num_bits = BBits;
static constexpr int num_base = MBase;
static constexpr int num_shft = SShift;
static_assert(num_base >= 0, "MBase must be positive.");
static_assert(num_bits >= 0, "BBits must be positive.");
static_assert(abs(num_shft) >= num_bits, "abs(SShift) must be more than BBits.");
// using 'int' type here to avoid unintentially casting to unsigned... unsure.
using bit_msk = cute::constant<int, (1 << num_bits) - 1>;
using yyy_msk = cute::constant<int, bit_msk{} << (num_base + max(0,num_shft))>;
using zzz_msk = cute::constant<int, bit_msk{} << (num_base - min(0,num_shft))>;
using msk_sft = cute::constant<int, num_shft>;
static constexpr uint32_t swizzle_code = uint32_t(yyy_msk{} | zzz_msk{});
template <class Offset>
CUTE_HOST_DEVICE constexpr static
auto
apply(Offset const& offset)
{
return offset ^ shiftr(offset & yyy_msk{}, msk_sft{}); // ZZZ ^= YYY
}
template <class Offset>
CUTE_HOST_DEVICE constexpr
auto
operator()(Offset const& offset) const
{
return apply(offset);
}
template <int B, int M, int S>
CUTE_HOST_DEVICE constexpr
auto
operator==(Swizzle<B,M,S> const&) const
{
return B == BBits && M == MBase && S == SShift;
}
};
//
// make_swizzle<0b1000, 0b0100>() -> Swizzle<1,2,1>
// make_swizzle<0b11000000, 0b00000110>() -> Swizzle<2,1,5>
//
template <uint32_t Y, uint32_t Z>
CUTE_HOST_DEVICE constexpr
auto
make_swizzle()
{
constexpr uint32_t BZ = popcount(Y); // Number of swizzle bits
constexpr uint32_t BY = popcount(Z); // Number of swizzle bits
static_assert(BZ == BY, "Number of bits in Y and Z don't match");
constexpr uint32_t TZ_Y = countr_zero(Y); // Number of trailing zeros in Y
constexpr uint32_t TZ_Z = countr_zero(Z); // Number of trailing zeros in Z
constexpr uint32_t M = cute::min(TZ_Y, TZ_Z) % 32;
constexpr int32_t S = int32_t(TZ_Y) - int32_t(TZ_Z); // Difference in trailing zeros
static_assert((Y | Z) == Swizzle<BZ,M,S>::swizzle_code, "Something went wrong.");
return Swizzle<BZ,M,S>{};
}
template <int B0, int M0, int S0,
int B1, int M1, int S1>
CUTE_HOST_DEVICE constexpr
auto
composition(Swizzle<B0,M0,S0>, Swizzle<B1,M1,S1>)
{
static_assert(S0 == S1, "Can only merge swizzles of the same shift.");
constexpr uint32_t Y = Swizzle<B0,M0,S0>::yyy_msk::value ^ Swizzle<B1,M1,S1>::yyy_msk::value;
constexpr uint32_t Z = Swizzle<B0,M0,S0>::zzz_msk::value ^ Swizzle<B1,M1,S1>::zzz_msk::value;
return make_swizzle<Y,Z>();
//return ComposedFn<Swizzle<B0,M0,S0>, Swizzle<B1,M1,S1>>{};
}
//
// Utility for slicing and swizzle "offsets"
//
// For swizzle functions, it is often needed to keep track of which bits are
// consumed and which bits are free. Furthermore, it is useful to know whether
// each of these bits is known statically or dynamically.
// MixedBits is an 32-bit unsigned integer class where some bits are known statically
// and some bits are known dynamically. These sets of bits are disjoint and it is
// known statically which bits are known dynamically.
// MixedBits can only be manipulated through bitwise operations
// Abstract value: StaticInt | (dynamic_int_ & StaticFlags)
template <uint32_t StaticInt,
uint32_t StaticFlags> // 0: static, 1: dynamic
struct MixedBits
{
// Representation invariants
static_assert(StaticFlags != 0, "Should be at least one dynamic bit in MixedBits.");
static_assert((StaticInt & StaticFlags) == 0, "No static/dynamic overlap allowed in MixedBits.");
uint32_t dynamic_int_;
// assert((dynamic_int_ & ~StaticFlags) == 0);
CUTE_HOST_DEVICE constexpr operator uint32_t() const noexcept { return StaticInt | dynamic_int_; }
};
// Return a value representing (C<s>{} | (d & C<f>)) potentially using MixedBits to track s and f.
// This maker does allow ((s & f) != 0) and enforces the MixedBits invariant before creation.
template <auto s, class DynamicType, auto f>
CUTE_HOST_DEVICE constexpr
auto
make_mixed_bits(C<s>, DynamicType const& d, C<f>)
{
static_assert(is_integral<DynamicType>::value);
constexpr uint32_t new_f = uint32_t(f) & ~uint32_t(s); // StaticBits take precedence, M<0,f>{d} | C<s>{}
if constexpr (new_f == 0 || is_static<DynamicType>::value) {
return C<s>{} | (d & C<new_f>{}); // Just return a static int
} else {
return MixedBits<s, new_f>{uint32_t(d) & new_f}; // MixedBits
}
CUTE_GCC_UNREACHABLE;
}
//
// Operators
//
// Equality
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator==(MixedBits<S0,F0> const& m, C<S1>)
{
return (S0 == (uint32_t(S1) & ~F0)) && (m.dynamic_int_ == (uint32_t(S1) & F0));
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator==(C<S1> s, MixedBits<S0,F0> const& m)
{
return m == s;
}
// Bitwise AND
template <uint32_t S0, uint32_t F0,
uint32_t S1, uint32_t F1>
CUTE_HOST_DEVICE constexpr
auto
operator&(MixedBits<S0,F0> const& m0, MixedBits<S1,F1> const& m1)
{
// Truth table for (S0,D0,F0) & (S1,D1,F1) -> (S,D,F)
// S0D0F0 | 0X0 | 001 | 011 | 1X0 |
// S1D1F1
// 0X0 | 0X0 | 0X0 | 0X0 | 0X0 |
// 001 | 0X0 | 001 | 001 | 001 |
// 011 | 0X0 | 001 | 011 | 011 |
// 1X0 | 0X0 | 001 | 011 | 1X0 |
return make_mixed_bits(C<S0 & S1>{},
//(S0 | m0.dynamic_int_) & (S1 | m1.dynamic_int_),
((S1 & F0) & m0.dynamic_int_) | ((S0 & F1) & m1.dynamic_int_) | (m0.dynamic_int_ & m1.dynamic_int_),
C<(S1 & F0) | (S0 & F1) | (F0 & F1)>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator&(MixedBits<S0,F0> const& m, C<S1>)
{
return make_mixed_bits(C<S0 & uint32_t(S1)>{},
m.dynamic_int_,
C<F0 & uint32_t(S1)>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator&(C<S1> s, MixedBits<S0,F0> const& m)
{
return m & s;
}
// Bitwise OR
template <uint32_t S0, uint32_t F0,
uint32_t S1, uint32_t F1>
CUTE_HOST_DEVICE constexpr
auto
operator|(MixedBits<S0,F0> const& m0, MixedBits<S1,F1> const& m1)
{
// Truth table for (S0,D0,F0) | (S1,D1,F1) -> (S,D,F)
// S0D0F0 | 0X0 | 001 | 011 | 1X0 |
// S1D1F1
// 0X0 | 0X0 | 001 | 011 | 1X0 |
// 001 | 001 | 001 | 011 | 1X0 |
// 011 | 011 | 011 | 011 | 1X0 |
// 1X0 | 1X0 | 1X0 | 1X0 | 1X0 |
return make_mixed_bits(C<S0 | S1>{},
((~S1 & F0) & m0.dynamic_int_) | ((~S0 & F1) & m1.dynamic_int_),
C<(~S0 & F1) | (~S1 & F0)>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator|(MixedBits<S0,F0> const& m, C<S1>)
{
return make_mixed_bits(C<S0 | uint32_t(S1)>{},
m.dynamic_int_,
C<F0 & ~uint32_t(S1)>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator|(C<S1> s, MixedBits<S0,F0> const& m)
{
return m | s;
}
// Bitwise XOR
template <uint32_t S0, uint32_t F0,
uint32_t S1, uint32_t F1>
CUTE_HOST_DEVICE constexpr
auto
operator^(MixedBits<S0,F0> const& m0, MixedBits<S1,F1> const& m1)
{
// Truth table for (S0,D0,F0) ^ (S1,D1,F1) -> (S,D,F)
// S0D0F0 | 0X0 | 001 | 011 | 1X0 |
// S1D1F1
// 0X0 | 0X0 | 001 | 011 | 1X0 |
// 001 | 001 | 001 | 011 | 011 |
// 011 | 011 | 011 | 001 | 001 |
// 1X0 | 1X0 | 011 | 001 | 0X0 |
return make_mixed_bits(C<(~S0 & S1 & ~F0) | (S0 & ~S1 & ~F1)>{},
(S0 | m0.dynamic_int_) ^ (S1 | m1.dynamic_int_),
C<F0 | F1>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator^(MixedBits<S0,F0> const& m, C<S1>)
{
return make_mixed_bits(C<(~S0 & uint32_t(S1) & ~F0) | (S0 & ~uint32_t(S1))>{},
(S0 | m.dynamic_int_) ^ uint32_t(S1),
C<F0>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator^(C<S1> s, MixedBits<S0,F0> const& m)
{
return m ^ s;
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator<<(MixedBits<S0,F0> const& m, C<S1>)
{
return make_mixed_bits(C<(S0 << S1)>{},
m.dynamic_int_ << S1,
C<(F0 << S1)>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
operator>>(MixedBits<S0,F0> const& m, C<S1>)
{
return make_mixed_bits(C<(S0 >> S1)>{},
m.dynamic_int_ >> S1,
C<(F0 >> S1)>{});
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
shiftl(MixedBits<S0,F0> const& m, C<S1> s)
{
if constexpr (S1 >= 0) {
return m << s;
} else {
return m >> -s;
}
}
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
shiftr(MixedBits<S0,F0> const& m, C<S1> s)
{
if constexpr (S1 >= 0) {
return m >> s;
} else {
return m << -s;
}
}
//
// Upcast and Downcast
//
template <uint32_t S0, uint32_t F0, auto S1>
CUTE_HOST_DEVICE constexpr
auto
safe_div(MixedBits<S0,F0> const& m, C<S1> s)
{
static_assert(has_single_bit(uint32_t(S1)), "Only divide MixedBits by powers of two.");
return make_mixed_bits(safe_div(C<S0>{}, s),
safe_div(m.dynamic_int_, s),
safe_div(C<F0>{}, s));
}
template <uint32_t N, uint32_t S0, uint32_t F0>
CUTE_HOST_DEVICE constexpr
auto
upcast(MixedBits<S0,F0> const& m)
{
static_assert(has_single_bit(N), "Only divide MixedBits by powers of two.");
return safe_div(m, C<N>{});
}
template <uint32_t N, class T, __CUTE_REQUIRES(cute::is_integral<T>::value)>
CUTE_HOST_DEVICE constexpr
auto
upcast(T const& m)
{
return safe_div(m, C<N>{});
}
template <uint32_t N, uint32_t S0, uint32_t F0>
CUTE_HOST_DEVICE constexpr
auto
downcast(MixedBits<S0,F0> const& m)
{
static_assert(has_single_bit(N), "Only scale MixedBits by powers of two.");
return make_mixed_bits(C<S0 * N>{},
m.dynamic_int_ * N,
C<F0 * N>{});
}
template <uint32_t N, class T, __CUTE_REQUIRES(cute::is_integral<T>::value)>
CUTE_HOST_DEVICE constexpr
auto
downcast(T const& m)
{
return m * C<N>{};
}
template <uint32_t S0, uint32_t F0>
CUTE_HOST_DEVICE constexpr
auto
max_alignment(MixedBits<S0,F0> const&)
{
return C<uint32_t(1) << countr_zero(S0 | F0)>{};
}
template <auto v>
CUTE_HOST_DEVICE constexpr
C<v>
max_alignment(C<v> const& c)
{
return c;
}
//
// Convert a Pow2Layout+Coord to a MixedBits
//
template <class Shape, class Stride, class Coord>
CUTE_HOST_DEVICE constexpr
auto
to_mixed_bits(Shape const& shape, Stride const& stride, Coord const& coord)
{
if constexpr (is_tuple<Shape>::value && is_tuple<Stride>::value && is_tuple<Coord>::value) {
static_assert(tuple_size<Shape>::value == tuple_size<Stride>::value, "Mismatched ranks");
static_assert(tuple_size<Shape>::value == tuple_size<Coord >::value, "Mismatched ranks");
return transform_apply(shape, stride, coord, [](auto const& s, auto const& d, auto const& c) { return to_mixed_bits(s,d,c); },
[](auto const&... a) { return (a ^ ...); });
} else if constexpr (is_integral<Shape>::value && is_integral<Stride>::value && is_integral<Coord>::value) {
static_assert(decltype(shape*stride)::value == 0 || has_single_bit(decltype(shape*stride)::value), "Requires pow2 shape*stride.");
return make_mixed_bits(Int<0>{}, coord * stride, (shape - Int<1>{}) * stride);
} else {
static_assert(is_integral<Shape>::value && is_integral<Stride>::value && is_integral<Coord>::value, "Either Shape, Stride, and Coord must be all tuples, or they must be all integral (in the sense of cute::is_integral).");
}
CUTE_GCC_UNREACHABLE;
}
template <class Layout, class Coord>
CUTE_HOST_DEVICE constexpr
auto
to_mixed_bits(Layout const& layout, Coord const& coord)
{
return to_mixed_bits(layout.shape(), layout.stride(), idx2crd(coord, layout.shape()));
}
//
// Display utilities
//
template <int B, int M, int S>
CUTE_HOST_DEVICE void print(Swizzle<B,M,S> const&)
{
printf("Sw<%d,%d,%d>", B, M, S);
}
template <uint32_t S, uint32_t F>
CUTE_HOST_DEVICE void print(MixedBits<S,F> const& m)
{
printf("M_%u|(%u&%u)=%u", S, m.dynamic_int_, F, uint32_t(m));
}
#if !defined(__CUDACC_RTC__)
template <int B, int M, int S>
CUTE_HOST std::ostream& operator<<(std::ostream& os, Swizzle<B,M,S> const&)
{
return os << "Sw<" << B << "," << M << "," << S << ">";
}
template <uint32_t S, class D, uint32_t F>
CUTE_HOST std::ostream& operator<<(std::ostream& os, MixedBits<S,F> const& m)
{
return os << "M_" << S << "|(" << m.dynamic_int_ << "&" << F << ")=" << uint32_t(m);
}
#endif // !defined(__CUDACC_RTC__)
} // end namespace cute
| include/cute/swizzle.hpp/0 | {
"file_path": "include/cute/swizzle.hpp",
"repo_id": "include",
"token_count": 7127
} | 16 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates exposing architecture support for multiply-add operations
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/arch/arch.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the operation implied by MMA.
struct OpMultiplyAdd {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the result is saturated to MAX_FLOAT|MIN_FLOAT or MAX_INT|MIN_INT
struct OpMultiplyAddSaturate {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to a narrower type (BF16)
struct OpMultiplyAddFastBF16 {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to a narrower type (F16)
struct OpMultiplyAddFastF16 {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input data types are mixed and the narrower type is
/// upcasted to the wider type
struct OpMultiplyAddMixedInputUpcast {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to 2 (big and small) TF32 components
// Perform 3xTF32 or 4xTF32 for every F32 output element
struct OpMultiplyAddFastF32 {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the input is converted to 2 (big and small) TF32 components
// Perform 3xTF32 or 4xTF32 for every complex<F32> output element
struct OpMultiplyAddComplexFastF32 {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating that staged accumulation is not to be used. This is valid only for SM89
/// FP8 kernels.
struct OpMultiplyAddFastAccum;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the complex multiply-add operation
struct OpMultiplyAddComplex {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the gaussian complex multiply-add operation
struct OpMultiplyAddGaussianComplex {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the inner product is defined by (XOR, POPC)
struct OpXorPopc {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag indicating the inner product is defined by (AND, POPC)
struct OpAndPopc {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifying math operators as thread-level operations.
struct OpClassSimt {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifying operators as Tensor Core operations.
struct OpClassTensorOp {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifying operators as WMMA Tensor Core operations
struct OpClassWmmaTensorOp {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Tag classifying operators as Tensor Core with structure sparse operations.
struct OpClassSparseTensorOp {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Number of threads participating
int kThreads_,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Inner product operator
typename Operator
>
struct Mma;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation - specialized for 1x1x1x1 matrix multiply operation
template <
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Inner product operator
typename Operator_
>
struct Mma<gemm::GemmShape<1, 1, 1>, 1, ElementA, LayoutA, ElementB, LayoutB, ElementC_, LayoutC, Operator_> {
using Shape = gemm::GemmShape<1, 1, 1>;
using Operator = Operator_;
using ElementC = ElementC_;
CUTLASS_HOST_DEVICE
void operator()(
Array<ElementC, 1> &d,
Array<ElementA, 1> const &a,
Array<ElementB, 1> const &b,
Array<ElementC, 1> const &c
) {
multiply_add<ElementA, ElementB, ElementC> op;
d[0] = op(a[0], b[0], c[0]);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Specifies internal data type for computation
struct SPFormatType {
enum Kind {
Thread
};
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Matrix multiply-add operation
template <
/// Size of the matrix product (concept: GemmShape)
typename Shape_,
/// Number of threads participating
int kThreads_,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Inner product operator
typename Operator,
/// Specifies meta data format
SPFormatType::Kind SPFormat = SPFormatType::Thread
>
struct SparseMma;
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Specializations for each compute capability
//
#include "cutlass/arch/mma_sm50.h"
#include "cutlass/arch/mma_sm60.h"
#include "cutlass/arch/mma_sm61.h"
#include "cutlass/arch/mma_sm70.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/arch/mma_sparse_sm80.h"
#include "cutlass/arch/mma_sm89.h"
#include "cutlass/arch/mma_sparse_sm89.h"
#include "cutlass/arch/mma_sm90.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
namespace detail {
/// Helper for determining whether staged accumulation should be used for a given operator
template <typename Operator>
struct UseStagedAccumulation {
static bool const value = platform::is_same<typename Operator::MathOperator, OpMultiplyAddFastF32>::value ||
platform::is_same<typename Operator::MathOperator, OpMultiplyAddComplexFastF32>::value ||
is_sm89_staged_policy_v<Operator>;
};
} // namespace detail
} // namespace arch
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/arch/mma.h/0 | {
"file_path": "include/cutlass/arch/mma.h",
"repo_id": "include",
"token_count": 2356
} | 17 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Matrix multiply
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <assert.h>
#endif
#include "cutlass/layout/matrix.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace arch {
////////////////////////////////////////////////////////////////////////////////
//
// WMMA template structure defines nvcuda::wmma::fragments and static assert for
// wmma native instruction sizes supported for half
//
////////////////////////////////////////////////////////////////////////////////
template <
typename Shape_,
typename LayoutA_,
typename LayoutB_,
typename ElementC_,
typename LayoutC_>
struct Wmma<
Shape_, ///< Size of the matrix product (concept: GemmShape)
cutlass::half_t, ///< ElementA
LayoutA_, ///< LayoutA
cutlass::half_t, ///< ElementB
LayoutB_, ///< LayoutB
ElementC_, ///< ElementC
LayoutC_, ///< LayoutC
cutlass::arch::OpMultiplyAdd ///< Operator (multiply-add, xor.popc)
> {
#if defined(CUTLASS_ARCH_WMMA_SM70_ENABLED)
using Shape = Shape_;
using ElementA = cutlass::half_t;
using LayoutA = LayoutA_;
using ElementB = cutlass::half_t;
using LayoutB = LayoutB_;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using Operator = cutlass::arch::OpMultiplyAdd;
using ArchTag = arch::Sm70;
// check supported wmma shape for the given multiplicand data types
static_assert(
platform::is_same<cutlass::gemm::GemmShape<16, 16, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape< 8, 32, 16>, Shape>::value ||
platform::is_same<cutlass::gemm::GemmShape<32, 8, 16>, Shape>::value,
"Supported list of wmma operator shape for f16 multiplicands are: 16x16x16, 8x32x16, and 32x8x16");
// check supported wmma output data type for the given multiplicand data types
static_assert(
platform::is_same<cutlass::half_t, ElementC>::value || platform::is_same<float, ElementC>::value,
"Supported of wmma output data type for f16 multiplicands are: f16 and f32");
// Wmma Fragment
using FragmentA = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementA>::Type,
typename CutlassToWmmaLayout<LayoutA>::Layout>;
using FragmentB = nvcuda::wmma::fragment<
nvcuda::wmma::matrix_b,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementB>::Type,
typename CutlassToWmmaLayout<LayoutB>::Layout>;
using FragmentC = nvcuda::wmma::fragment<
nvcuda::wmma::accumulator,
Shape::kM,
Shape::kN,
Shape::kK,
typename CutlassToWmmaDataType<ElementC>::Type>;
/// Performs a nvcuda::wmma matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
nvcuda::wmma::mma_sync(D, A, B, C);
}
#else
static_assert(false, "wmma.mma.sync for floating point multiplicands is avialable only for SM70 and beyond");
#endif
};
} // namespace arch
} // namespace cutlass
| include/cutlass/arch/wmma_sm70.h/0 | {
"file_path": "include/cutlass/arch/wmma_sm70.h",
"repo_id": "include",
"token_count": 1912
} | 18 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped
matrix multiply-add with the appropriate threadblock-scoped epilogue.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_fixed_channels.h"
#include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_few_channels.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_fixed_channels.h"
#include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_few_channels.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace conv {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename OperatorClass,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kOptimized,
conv::StrideSupport StrideSupport = StrideSupport::kUnity,
/// Access granularity of A matrix in units of elements
int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value,
/// Access granularity of B matrix in units of elements
int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value
> struct DefaultConv2dFprop;
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassTensorOp convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kFixedChannels,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFixedChannels<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFixedChannels<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and two stage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kFixedChannels,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFixedChannels<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFixedChannels<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kFewChannels,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFewChannels<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFewChannels<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kFewChannels,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorFewChannels<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorFewChannels<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and multistage
/// pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB,
int InterleavedK
>
struct DefaultConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapA = typename MmaCore::SmemThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapB = typename MmaCore::SmemThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Global,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm
/// and 2 stage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm and 2 stage
/// pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB,
int InterleavedK
>
struct DefaultConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
2, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapA = typename MmaCore::SmemThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
// Note GEMM shared memory threadmap is used here because conv global memory
// layout needs to be mapped to fprop which is similar to the crosswise
// layout which is used by the interleaved GEMM shared memory threadmap.
// The Interleaved GEMM global memory layout is similar to the congruous
// layout.
using ThreadMapB = typename MmaCore::SmemThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and
/// multistage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
Stages, MathOperatorTag
>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA,
AccessTypeA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB,
AccessTypeB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
static cutlass::arch::CacheOperation::Kind const CacheOpB =
((sizeof_bits<ElementB>::value * AlignmentB) == 128)
? cutlass::arch::CacheOperation::Global
: cutlass::arch::CacheOperation::Always;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
CacheOpB,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
4
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimzed IteratorAlgorithm and
// multistage pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB,
int InterleavedK
>
struct DefaultConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>, ElementAccumulator, LayoutC, arch::OpClassTensorOp,
Stages, MathOperatorTag, true
>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::SmemThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::SmemThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Global,
MmaPolicy,
Stages
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm
/// and 2 stage pipeline.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using AccessTypeA = cutlass::AlignedArray<ElementA, AlignmentA>;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA,
AccessTypeA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using AccessTypeB = cutlass::AlignedArray<ElementB, AlignmentB>;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB,
AccessTypeB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename detail::DefaultConvEpilogue<
ArchTag,
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm and 2 stage
/// pipeline with interleaved layout.
template <
typename ElementA,
typename ElementB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB,
int InterleavedK
>
struct DefaultConv2dFprop <
ElementA,
layout::TensorNCxHWx<InterleavedK>,
ElementB,
layout::TensorCxRSKx<InterleavedK>,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassTensorOp,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::ColumnMajorInterleaved<InterleavedK>,
ElementB, layout::RowMajorInterleaved<InterleavedK>,
ElementAccumulator, LayoutC, arch::OpClassTensorOp,
2, MathOperatorTag, true>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::SmemThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, layout::TensorNCxHWx<InterleavedK>,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::SmemThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, layout::TensorCxRSKx<InterleavedK>,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaTensorOp = typename MmaCore::MmaTensorOp;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultInterleavedConvEpilogue<
ThreadblockShape,
WarpMmaTensorOp,
kPartitionsK,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
InterleavedK
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// OpClassSimt convolutions
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
4
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm,
/// multi-stage pipeline, and FFMA-based mainloop for SM80
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
int Stages,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
Stages, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmMultistage<
ThreadblockShape,
IteratorA,
SmemIteratorA,
arch::CacheOperation::Always,
IteratorB,
SmemIteratorB,
arch::CacheOperation::Always,
MmaPolicy,
Stages
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
4
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Analytic IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kAnalytic,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA, LayoutA,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorAnalytic<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB, LayoutB,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
4
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a kernel for Conv2dFprop specialization for Optimized IteratorAlgorithm,
/// 2 stage pipeline, and FFMA-based mainloop for SM50
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementAccumulator,
typename ArchTag,
typename ThreadblockShape,
typename WarpShape,
typename InstructionShape,
typename EpilogueOutputOp,
typename ThreadblockSwizzle,
typename MathOperatorTag,
conv::StrideSupport StrideSupport,
int AlignmentA,
int AlignmentB
>
struct DefaultConv2dFprop <
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementAccumulator,
arch::OpClassSimt,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
2,
MathOperatorTag,
IteratorAlgorithm::kOptimized,
StrideSupport,
AlignmentA,
AlignmentB
> {
// Define the core components from GEMM
using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
ThreadblockShape, WarpShape, InstructionShape, ElementA, layout::RowMajor,
ElementB, layout::ColumnMajor, ElementAccumulator, layout::RowMajor, arch::OpClassSimt,
2, MathOperatorTag>;
// Define iterators over tiles from the A operand
using ThreadMapA = typename MmaCore::IteratorThreadMapA;
using IteratorA =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropActivationTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK>,
ElementA,
LayoutA,
ThreadMapA
>
>;
using SmemIteratorA = typename MmaCore::SmemIteratorA;
// Define iterators over tiles from the B operand
using ThreadMapB = typename MmaCore::IteratorThreadMapB;
using IteratorB =
cutlass::conv::threadblock::TileIterator<
cutlass::conv::threadblock::Conv2dFpropFilterTileAccessIteratorOptimized<
cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
ElementB,
LayoutB,
ThreadMapB
>
>;
using SmemIteratorB = typename MmaCore::SmemIteratorB;
// Warp-level GEMM components
using WarpMmaSimtOp = typename MmaCore::MmaWarpSimt;
using MmaPolicy = typename MmaCore::MmaPolicy;
// Define the Mma
using Mma = threadblock::ImplicitGemmPipelined<
ThreadblockShape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
MmaPolicy
>;
// Define the epilogue
using Epilogue = typename epilogue::threadblock::DefaultEpilogueSimt<
ThreadblockShape,
WarpMmaSimtOp,
EpilogueOutputOp,
EpilogueOutputOp::kCount,
false,
layout::NoPermute,
StrideSupport,
4
>::Epilogue;
// Define the kernel
using Kernel = cutlass::conv::kernel::ImplicitGemmConvolution<
Mma,
Epilogue,
ThreadblockSwizzle,
conv::Operator::kFprop
>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace conv
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| include/cutlass/conv/kernel/default_conv2d_fprop.h/0 | {
"file_path": "include/cutlass/conv/kernel/default_conv2d_fprop.h",
"repo_id": "include",
"token_count": 19809
} | 19 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.