text
stringlengths
27
947k
id
stringlengths
10
118
metadata
dict
__index_level_0__
int64
0
80
{ "path": "./../../../../examples/python/02_pytorch_extension_grouped_gemm.ipynb" }
python/docs_src/source/externals/02_pytorch_extension_grouped_gemm.nblink/0
{ "file_path": "python/docs_src/source/externals/02_pytorch_extension_grouped_gemm.nblink", "repo_id": "python", "token_count": 39 }
53
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Utility functions for Conv2d tests. """ from cutlass_library import SubstituteTemplate import torch import cutlass from cutlass_library import ( ConvKind, ConvMode, DataType, DataTypeNames, EpilogueScheduleSuffixes, KernelScheduleSuffixes, LayoutType, OpcodeClassNames, ShortDataTypeNames, ShortLayoutTypeNames, SplitKMode, ) from cutlass.shape import Conv2DProblemSize from cutlass.utils.datatypes import numpy_type, torch_type from conv2d_problem_sizes import TestbedConv2dProblemSizes def get_name_conv2d( arch, conv_kind, element, element_accumulator, element_output, opclass, threadblock_shape, warp_count, instruction_shape, stages, iterator_algorithm, swizzle, split_k_mode, split_k_slices, activation ): """ Generates a procedural name for a test case for conv2d :param arch: compute capability of kernel being generated :type arch: int :param conv_kind: the convolution type (i.e. fprop, dgrad, wgrad) :type conv_kind: str :param iterator_algorithm: the iterator algorithm applied :type iterator_algorithm: cutlass_library.library.IteratorAlgorithm :param element_a: data type of operand A :param element_b: data type of operand B :param element_c: data type of operand C :param element_accumulator: data type used in accumulation :param opclass: class of operation being performed (e.g., SIMT, Tensor Core) :type opclass: cutlass.OpcodeClass :param threadblock_shape: indexable container of dimensions of threadblock tiles :param stages: number of pipeline stages to use in the kernel :type stages: int :param stride_support: stride support of dgrad :param alignment: int :type alignment: int :return: str """ if iterator_algorithm is None: iterator_algorithm = "AUTO" if swizzle is None: swizzle = 1 name_format = "test_SM${arch}_Device_Conv2d_${conv_kind}_${iter_alg}_ImplicitGemm_${eA}nhwc_${eB}nhwc_${eC}nhwc_${opclass}_${acc}_${tbM}x${tbN}x${tbK}_${wM}x${wN}x${wK}_${IM}${IN}${IK}_stage${stages}_swizzle${swizzle}_${split_k_mode}${split_k_slices}_${activation}" return SubstituteTemplate( name_format, { "arch": str(arch), "conv_kind": conv_kind, "iter_alg": iterator_algorithm, "eA": DataTypeNames[element], "eB": DataTypeNames[element], "eC": DataTypeNames[element_output], "opclass": opclass, "acc": DataTypeNames[element_accumulator], "tbM": str(threadblock_shape[0]), "tbN": str(threadblock_shape[1]), "tbK": str(threadblock_shape[2]), "wM": str(threadblock_shape[0] // warp_count[0]), "wN": str(threadblock_shape[1] // warp_count[1]), "wK": str(threadblock_shape[2] // warp_count[2]), "IM": str(instruction_shape[0]), "IN": str(instruction_shape[1]), "IK": str(instruction_shape[2]), "stages": str(stages), "swizzle": str(swizzle), "split_k_mode": split_k_mode, "split_k_slices": str(split_k_slices), "activation": activation } ) def conv2d_few_channel_problemsizes(channels): problem_sizes = [ Conv2DProblemSize( 1, 8, 8, channels, 16, 3, 3, channels, 1, 1, 2, 2, 1, 1, ConvMode.CrossCorrelation, 1, 1 ), Conv2DProblemSize( 1, 16, 16, channels, 16, 3, 3, channels, 1, 1, 2, 2, 1, 1, ConvMode.CrossCorrelation, 1, 1 ), Conv2DProblemSize( 1, 16, 16, channels, 16, 7, 7, channels, 1, 1, 1, 1, 1, 1, ConvMode.CrossCorrelation, 1, 1 ), Conv2DProblemSize( 1, 224, 224, channels, 32, 7, 7, channels, 1, 1, 1, 1, 1, 1, ConvMode.CrossCorrelation, 1, 1 ), Conv2DProblemSize( 1, 224, 224, channels, 64, 7, 7, channels, 1, 1, 2, 2, 1, 1, ConvMode.CrossCorrelation, 1, 1 ), Conv2DProblemSize( 1, 224, 224, channels, 64, 5, 5, channels, 1, 1, 1, 1, 1, 1, ConvMode.CrossCorrelation, 1, 1 ), Conv2DProblemSize( 1, 224, 224, channels, 64, 5, 5, channels, 1, 1, 2, 2, 1, 1, ConvMode.CrossCorrelation, 1, 1 ), ] return problem_sizes def validate_problem_size(ps, conv_kind, split_k_slices): P = (ps.H + 2 * ps.pad_h - ps.dilation_h * (ps.R - 1) - 1) // ps.stride_h + 1 Q = (ps.W + 2 * ps.pad_w - ps.dilation_w * (ps.S - 1) - 1) // ps.stride_w + 1 if P != ps.P or Q != ps.Q: return False # Split-K (serial or parallel) is not supported for strided dgrad if conv_kind == "dgrad" and split_k_slices > 1 and (ps.stride_h > 1 or ps.stride_w > 1): return False return True class Conv2dLauncherFrontend: def __init__(self, plan: cutlass.Conv2d, seed: int = 80, backend="numpy"): self.operation = plan self.conv_kind = plan.conv_kind self.seed = seed self.backend = backend self.dtype_A = plan._element_a self.dtype_B = plan._element_b self.dtype_C = plan._element_c self.dtype_acc = plan._element_accumulator self.layout_A = LayoutType.TensorNHWC self.layout_B = LayoutType.TensorNHWC self.layout_C = LayoutType.TensorNHWC self.layout_D = LayoutType.TensorNHWC self.element_compute = DataType.f32 if self.dtype_A in [cutlass.DataType.f16, cutlass.DataType.bf16]: self.rand_max = 1 else: self.rand_max = 4 self.activation = plan.activation def uniform_init(self, size, dtype): tensor = torch.ceil( torch.empty(size=size, dtype=torch_type(dtype), device="cuda").uniform_(-self.rand_max - 0.5, self.rand_max - 0.5) ).to(memory_format=torch.channels_last) return tensor def reference(self, ps, A, B, C, alpha, beta, activation): if self.conv_kind == ConvKind.Fprop: torch_result = alpha * torch.ops.aten.conv2d( A, B, stride=(ps.stride_h, ps.stride_w), padding=(ps.pad_h, ps.pad_w), dilation=(ps.dilation_h, ps.dilation_w) ) + beta * C elif self.conv_kind == ConvKind.Dgrad: torch_result = alpha * torch.nn.grad.conv2d_input( (ps.N, ps.C, ps.H, ps.W), B, A, padding=(ps.pad_h, ps.pad_w), stride=(ps.stride_h, ps.stride_w) ) + beta * C elif self.conv_kind == ConvKind.Wgrad: torch_result = alpha * torch.nn.grad.conv2d_weight( B, (ps.K, ps.C, ps.R, ps.S), A, padding=(ps.pad_h, ps.pad_w), stride=(ps.stride_h, ps.stride_w) ) + beta * C else: raise Exception(f"Conv kind {self.conv_kind} is currently unsupported.") if activation == cutlass.backend.epilogue.relu: torch_result = torch.nn.functional.relu(torch_result) elif activation == cutlass.backend.epilogue.leaky_relu: torch_result = torch.nn.functional.leaky_relu(torch_result, 0.5) return torch_result def run(self, ps, split_k_mode=SplitKMode.Serial, split_k_slices=1, alpha=1.0, beta=0.0): if self.conv_kind == ConvKind.Fprop: tensor_A_size = (ps.N, ps.C, ps.H, ps.W) tensor_B_size = (ps.K, ps.C, ps.R, ps.S) tensor_C_size = (ps.N, ps.K, ps.P, ps.Q) elif self.conv_kind == ConvKind.Dgrad: tensor_A_size = (ps.N, ps.K, ps.P, ps.Q) tensor_B_size = (ps.K, ps.C, ps.R, ps.S) tensor_C_size = (ps.N, ps.C, ps.H, ps.W) elif self.conv_kind == ConvKind.Wgrad: tensor_A_size = (ps.N, ps.K, ps.P, ps.Q) tensor_B_size = (ps.N, ps.C, ps.H, ps.W) tensor_C_size = (ps.K, ps.C, ps.R, ps.S) else: raise Exception(f"Conv kind {self.conv_kind} is not supported") torch.manual_seed(self.seed) tensor_A = self.uniform_init(size=tensor_A_size, dtype=self.dtype_A) tensor_B = self.uniform_init(size=tensor_B_size, dtype=self.dtype_B) tensor_C = self.uniform_init(size=tensor_C_size, dtype=self.dtype_C) tensor_D = torch.zeros_like(tensor_C).to(memory_format=torch.channels_last) args = self.operation.run(tensor_A, tensor_B, tensor_C, tensor_D, stride=(ps.stride_h, ps.stride_w), padding=(ps.pad_h, ps.pad_w), dilation=(ps.dilation_h, ps.dilation_w), alpha=alpha, beta=beta, split_k=(split_k_mode, split_k_slices)) args.sync() tensor_D_ref = self.reference(ps, tensor_A, tensor_B, tensor_C, alpha, beta, self.activation) torch.cuda.synchronize() passed = torch.allclose(tensor_D, tensor_D_ref, atol=2e-06) return passed def add_test( cls, cc, conv_kind, problem_sizes, element, element_accumulator, element_output, opclass, threadblock_shape, warp_count, instruction_shape, stages, iterator_algorithm=None, swizzle=None, split_k_mode="serial", split_k_slices=1, activation = "identity" ): """Create a test-running function with the given specification""" test_name = get_name_conv2d( cc, conv_kind, element, element_accumulator, element_output, opclass, threadblock_shape, warp_count, instruction_shape, stages, iterator_algorithm, swizzle, split_k_mode, split_k_slices, activation) def run(self): # Create the plan plan = cutlass.Conv2d( kind=conv_kind, element=element, element_accumulator=element_accumulator, element_C=element_output, element_D=element_output ) # Set the opclass plan.opclass = opclass # Set the tile description td = { "threadblock_shape": threadblock_shape, "warp_count": warp_count, "stages": stages, "instruction_shape": instruction_shape, } plan.tile_description = td # Set iterator algorithm if iterator_algorithm is not None: plan.iterator_algorithm = iterator_algorithm # Set swizzling functor if swizzle is not None: plan.swizzling_stride = swizzle if activation != "identity": if activation == "leaky_relu": plan.activation = (cutlass.epilogue.leaky_relu, 0.5) else: plan.activation = getattr(cutlass.epilogue, activation) conv2d_launcher = Conv2dLauncherFrontend(plan, 80, backend="torch") for ps in problem_sizes: if not validate_problem_size(ps, conv_kind, split_k_slices): continue self.assertTrue(conv2d_launcher.run(ps, split_k_mode, split_k_slices, 1.0, 2.0)) setattr(cls, test_name, run) return run def get_conv_problems(): # 64: minimum channel size conv_problems = TestbedConv2dProblemSizes(64).all # Insert alignment 4 & 2 tests conv_problems += [ Conv2DProblemSize( 1, 4, 4, 12, 8, 3, 3, 12, 0, 0, 3, 3, 1, 1, ConvMode.CrossCorrelation, 1, 1 ), Conv2DProblemSize( 1, 4, 4, 14, 8, 3, 3, 14, 0, 0, 3, 3, 1, 1, ConvMode.CrossCorrelation, 1, 1 ), Conv2DProblemSize( 1, 23, 56, 98, 128, 3, 3, 98, 4, 5, 3, 3, 1, 1, ConvMode.CrossCorrelation, 1, 1 ), ] return conv_problems
test/python/cutlass/conv2d/conv2d_test_utils.py/0
{ "file_path": "test/python/cutlass/conv2d/conv2d_test_utils.py", "repo_id": "test", "token_count": 6885 }
54
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for conversion operators. */ #include "../common/cutlass_unit_test.h" #include "cutlass/numeric_conversion.h" #include "cutlass/layout/matrix.h" #include "cutlass/util/host_tensor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace core { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Simple conversion function template <typename Destination, typename Source, int Count> __global__ void convert( cutlass::Array<Destination, Count> *destination, cutlass::Array<Source, Count> const *source) { cutlass::NumericArrayConverter<Destination, Source, Count> convert; *destination = convert(*source); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Destination, typename Source, int Count> void run_test(const char dest_name[], const char source_name[], const int range = 4, const int offset = 0) { const int kN = Count; dim3 grid(1, 1); dim3 block(1, 1); cutlass::HostTensor<Destination, cutlass::layout::RowMajor> destination({1, kN}); cutlass::HostTensor<Source, cutlass::layout::RowMajor> source({1, kN}); auto source_ref = source.host_ref(); auto destination_ref = destination.host_ref(); for (int i = 0; i < kN; ++i) { source_ref.at({0, i}) = Source(i % range + offset); } source.sync_device(); convert<Destination, Source, kN><<< grid, block >>>( reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()), reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data()) ); destination.sync_host(); for (int i = 0; i < kN; ++i) { EXPECT_TRUE(float(destination_ref.at({0, i})) == float(source_ref.at({0, i}))) << "Destination type: " << dest_name << " "<< float(destination_ref.at({0, i})) << ", Source type: " << source_name << " " << float(source_ref.at({0, i})) << ", Count: " << Count; } } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Destination, typename Source, typename ScaleFactor, int Count> __global__ void convert_with_scale_factor( cutlass::Array<Destination, Count> *destination, cutlass::Array<Source, Count> const *source, cutlass::Array<ScaleFactor, Count> const *scale_factor) { cutlass::NumericArrayConverter<Destination, Source, Count> convert; *destination = convert(*source, *scale_factor); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Destination, typename Source, typename ScaleFactor, int Count> void run_test_with_scalefactor(const char dest_name[], const char source_name[], const char scale_factor_name[], const int range = 4, const int offset = 0) { const int kN = Count; dim3 grid(1, 1); dim3 block(1, 1); cutlass::HostTensor<Destination, cutlass::layout::RowMajor> destination({1, kN}); cutlass::HostTensor<Source, cutlass::layout::RowMajor> source({1, kN}); cutlass::HostTensor<ScaleFactor, cutlass::layout::RowMajor> scale_factor({1, kN}); auto source_ref = source.host_ref(); auto destination_ref = destination.host_ref(); auto scale_factor_ref = scale_factor.host_ref(); for (int i = 0; i < kN; ++i) { source_ref.at({0, i}) = Source(i % range + offset); } for (int i = 0; i < kN; ++i) { scale_factor_ref.at({0, i}) = ScaleFactor(1 + i % 8); } source.sync_device(); scale_factor.sync_device(); convert_with_scale_factor<Destination, Source, ScaleFactor, kN><<< grid, block >>>( reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()), reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data()), reinterpret_cast<cutlass::Array<ScaleFactor, kN> const *>(scale_factor.device_data()) ); destination.sync_host(); for (int i = 0; i < kN; ++i) { float ref = float(source_ref.at({0, i})) / float(scale_factor_ref.at({0, i})); bool pass = float(destination_ref.at({0, i})) == ref; EXPECT_TRUE(pass) << "Destination type: " << dest_name << " "<< float(destination_ref.at({0, i})) << std::endl << ", Source type: " << source_name << " " << float(source_ref.at({0, i})) << std::endl << ", Scalefactor type: " << source_name << " " << float(scale_factor_ref.at({0, i})) << std::endl << ", idx: " << i << std::endl; } } } // namespace kernel } // namespace core } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f32_to_f16_rn) { constexpr int kN = 1; using Source = float; const char source_name[] = "float"; using Destination = cutlass::half_t; const char dest_name[] = "half_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f32x2_to_f16x2_rn) { constexpr int kN = 2; using Source = float; const char source_name[] = "float"; using Destination = cutlass::half_t; const char dest_name[] = "half_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f32x8_to_f16x8_rn) { constexpr int kN = 8; using Source = float; const char source_name[] = "float"; using Destination = cutlass::half_t; const char dest_name[] = "half_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f16_to_f32_rn) { int const kN = 1; using Source = cutlass::half_t; const char source_name[] = "half_t"; using Destination = float; const char dest_name[] = "float"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f16x8_to_f32x8_rn) { int const kN = 8; using Source = cutlass::half_t; const char source_name[] = "half_t"; using Destination = float; const char dest_name[] = "float"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f32_to_fe4m3_rn) { int const kN = 1; using Source = float; const char source_name[] = "float"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f32_to_fe4m3_rn_array) { int const kN = 27; using Source = float; const char source_name[] = "float"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f32_to_fe5m2_rn) { int const kN = 1; using Source = float; const char source_name[] = "float"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f32_to_fe5m2_rn_array) { int const kN = 27; using Source = float; const char source_name[] = "float"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f16_to_fe4m3_rn) { int const kN = 1; using Source = cutlass::half_t; const char source_name[] = "half_t"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f16_to_fe4m3_rn_array) { int const kN = 27; using Source = cutlass::half_t; const char source_name[] = "half_t"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f16_to_fe5m2_rn) { int const kN = 1; using Source = cutlass::half_t; const char source_name[] = "half_t"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f16_to_fe5m2_rn_array) { int const kN = 27; using Source = cutlass::half_t; const char source_name[] = "half_t"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, bf16_to_fe4m3_rn) { int const kN = 1; using Source = cutlass::bfloat16_t; const char source_name[] = "bfloat16_t"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, bf16_to_fe4m3_rn_array) { int const kN = 27; using Source = cutlass::bfloat16_t; const char source_name[] = "bfloat16_t"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, bf16_to_fe5m2_rn) { int const kN = 1; using Source = cutlass::bfloat16_t; const char source_name[] = "bfloat16_t"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, bf16_to_fe5m2_rn_array) { int const kN = 27; using Source = cutlass::bfloat16_t; const char source_name[] = "bfloat16_t"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, fe4m3_to_fe5m2_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe4m3_to_fe5m2_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_to_fe4m3_rn) { int const kN = 1; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_to_fe4m3_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe4m3_to_f32_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = float; const char dest_name[] = "float"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f32x8_to_s8x8_rn) { int const kN = 8; using Source = float; const char source_name[] = "float"; using Destination = int8_t; const char dest_name[] = "int8_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe4m3_to_f32_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = float; const char dest_name[] = "float"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_to_f32_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = float; const char dest_name[] = "float"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe4m3_to_f16_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = cutlass::half_t; const char dest_name[] = "half_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe4m3_to_f16_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = cutlass::half_t; const char dest_name[] = "half_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_to_f16_rn) { int const kN = 1; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = cutlass::half_t; const char dest_name[] = "half_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_to_f16_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = cutlass::half_t; const char dest_name[] = "half_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe4m3_to_bf16_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = cutlass::bfloat16_t; const char dest_name[] = "bfloat16_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe4m3_to_bf16_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = cutlass::bfloat16_t; const char dest_name[] = "bfloat16_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_to_bf16_rn) { int const kN = 1; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = cutlass::bfloat16_t; const char dest_name[] = "bfloat16_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_to_bf16_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = cutlass::bfloat16_t; const char dest_name[] = "bfloat16_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } // These are included as regression tests for a special case when N = 4. TEST(NumericConversion, int4b_t_to_fe5m2_t_array_4) { int const kN = 4; using Source = cutlass::int4b_t; const char source_name[] = "int4b_t"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, int_to_fe4m3_t_array_4) { int const kN = 4; using Source = int; const char source_name[] = "int"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, int2b_t_to_fe4m3_t_array_4) { int const kN = 4; using Source = cutlass::int2b_t; const char source_name[] = "int2b_t"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_t_to_double_array_4) { int const kN = 4; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = double; const char dest_name[] = "double"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, int_to_fe4m3_t_array_32) { int const kN = 32; using Source = int; const char source_name[] = "int"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct GetName { static constexpr char name[] = "UNSUPPORTED"; }; template <> struct GetName<cutlass::int4b_t> { static constexpr char name[] = "int4b_t"; }; template <> struct GetName<uint8_t> { static constexpr char name[] = "uint8_t"; }; template <> struct GetName<int8_t> { static constexpr char name[] = "int8_t"; }; template <> struct GetName<cutlass::float_e4m3_t> { static constexpr char name[] = "float_e4m3_t"; }; template <> struct GetName<cutlass::half_t> { static constexpr char name[] = "half_t"; }; template <> struct GetName<cutlass::bfloat16_t> { static constexpr char name[] = "bfloat16_t"; }; template <> struct GetName<float> { static constexpr char name[] = "float"; }; template <typename Result_, typename Source_> struct ResultSourcePair { using Result = Result_; using Source = Source_; }; template <typename ResultSourcePair> class VectorArrayConverterTest : public testing::Test { public: using Result = typename ResultSourcePair::Result; using Source = typename ResultSourcePair::Source; template <int N> static void emit_test() { const int range = 1 << cutlass::sizeof_bits<Source>::value; const int offset = cutlass::platform::numeric_limits<Source>::lowest(); test::core::kernel::run_test<Result, Source, N>(GetName<Result>::name, GetName<Source>::name, range, offset); } }; using VectorConvertTypes = ::testing::Types< ResultSourcePair<float, int8_t>, ResultSourcePair<float, uint8_t>, ResultSourcePair<cutlass::half_t, int8_t>, ResultSourcePair<cutlass::half_t, uint8_t>, ResultSourcePair<cutlass::bfloat16_t, uint8_t>, ResultSourcePair<cutlass::bfloat16_t, int8_t>, ResultSourcePair<cutlass::float_e4m3_t, cutlass::int4b_t>, ResultSourcePair<cutlass::half_t, cutlass::int4b_t>, ResultSourcePair<cutlass::bfloat16_t, cutlass::int4b_t>, ResultSourcePair<float, cutlass::int4b_t> >; TYPED_TEST_SUITE(VectorArrayConverterTest, VectorConvertTypes); TYPED_TEST(VectorArrayConverterTest, array_1) { TestFixture::template emit_test<1>(); } TYPED_TEST(VectorArrayConverterTest, array_2) { TestFixture::template emit_test<2>(); } TYPED_TEST(VectorArrayConverterTest, array_3) { TestFixture::template emit_test<3>(); } TYPED_TEST(VectorArrayConverterTest, array_4) { TestFixture::template emit_test<4>(); } TYPED_TEST(VectorArrayConverterTest, array_5) { TestFixture::template emit_test<5>(); } TYPED_TEST(VectorArrayConverterTest, array_8) { TestFixture::template emit_test<8>(); } TYPED_TEST(VectorArrayConverterTest, array_10) { // N > 8 and N is not a multiple of 4 TestFixture::template emit_test<10>(); } TYPED_TEST(VectorArrayConverterTest, array_12) { // N > 8 and N is a multiple of 4 TestFixture::template emit_test<12>(); } TYPED_TEST(VectorArrayConverterTest, array_16) { // N > 8 and N is a multiple of 8 TestFixture::template emit_test<16>(); } TYPED_TEST(VectorArrayConverterTest, array_17) { // N > 8 and N is not a multiple of 8 TestFixture::template emit_test<17>(); } TYPED_TEST(VectorArrayConverterTest, array_27) { // Test entire conversion range with residue (for int4) TestFixture::template emit_test<27>(); } TYPED_TEST(VectorArrayConverterTest, array_31) { // Force use of converters for 16, 8, 4, 2 and scalar // if max width is 16 TestFixture::template emit_test<31>(); } TYPED_TEST(VectorArrayConverterTest, array_63) { // Force use of converters for 32, 16, 8, 4, 2 and scalar // if max width is 32 TestFixture::template emit_test<63>(); } TYPED_TEST(VectorArrayConverterTest, array_256) { // Test entire conversion range (for int8) TestFixture::template emit_test<256>(); } TYPED_TEST(VectorArrayConverterTest, array_259) { // Force use of 4, 2 and scalar converter (if max width is 4) TestFixture::template emit_test<259>(); } TYPED_TEST(VectorArrayConverterTest, array_263) { // Force use of 8, 4, 2 and scalar converter (if max width is 8) TestFixture::template emit_test<263>(); } /////////////////////////////////////////////////////////////////////////////////////////////////
test/unit/core/numeric_conversion.cu/0
{ "file_path": "test/unit/core/numeric_conversion.cu", "repo_id": "test", "token_count": 8188 }
55
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/half.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/gemm/warp/mma_tensor_op_sm70.h" #include "cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "testbed.h" ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_64x64_32x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_128x64_64x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_64x128_32x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_64x64_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_64x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_128x64_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_128x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_128x256_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 256, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_256x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<256, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Mixed: F32 accumulation // ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_64x64_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_128x256_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 256, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_256x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<256, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_128x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_64x64_32x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_64x128_32x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_128x64_64x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// // // F32 accumulation, F32 output // ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_64x64_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_64x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_128x64_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_128x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_128x256_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 256, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_256x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<256, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_64x64_32x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_128x64_64x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_64x128_32x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// // This works TEST(SM70_Epilogue_threadblock_epilogue, vec8_f16_f32_volta_tensor_op_64x64_32x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 8; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } // This works TEST(SM70_Epilogue_threadblock_epilogue, vec2_f16_f32_volta_tensor_op_64x64_32x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 2; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// // This fails TEST(SM70_Epilogue_threadblock_epilogue, vec1_f16_f32_volta_tensor_op_64x64_32x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 1; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, vec1_f32_volta_tensor_op_128x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 1; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, vec1_f16_f32_volta_tensor_op_128x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 1; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, vec1_f16_f32_volta_tensor_op_128x256_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 256, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 1; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } /////////////////////////////////////////////////////////////////////////////////////////////////
test/unit/epilogue/threadblock/epilogue_volta_tensor_op.cu/0
{ "file_path": "test/unit/epilogue/threadblock/epilogue_volta_tensor_op.cu", "repo_id": "test", "token_count": 27577 }
56
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide Rank 2k update interface */ #pragma once #include <iostream> #include <fstream> #include <sstream> #include "../../common/cutlass_unit_test.h" #include "cutlass/blas3.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/distribution.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/host/error_metrics.h" #include "cutlass/util/reference/host/rank_2k.h" #include "cutlass/util/reference/host/rank_2k_complex.h" #include "testbed_utils.h" namespace test { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Rank2K> struct TestbedRank2KUniversal { using ElementA = typename Rank2K::ElementA; using ElementB = typename Rank2K::ElementB; using ElementC = typename Rank2K::ElementC; using ElementAccumulator = typename Rank2K::ElementAccumulator; using ElementCompute = typename Rank2K::Rank2Kkernel::Epilogue::OutputOp::ElementCompute; /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; uint64_t seed; cutlass::HostTensor<typename Rank2K::ElementA, typename Rank2K::LayoutA> tensor_A; cutlass::HostTensor<typename Rank2K::ElementB, typename Rank2K::LayoutB> tensor_B; cutlass::HostTensor<typename Rank2K::ElementC, typename Rank2K::LayoutC> tensor_C; cutlass::HostTensor<typename Rank2K::ElementC, typename Rank2K::LayoutC> tensor_D; cutlass::HostTensor<typename Rank2K::ElementC, typename Rank2K::LayoutC> reference_D; // // Methods // TestbedRank2KUniversal( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint64_t seed_ = 2080 ): init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed, int mantissa_in_bits) { if (dist_kind == cutlass::Distribution::Uniform) { double scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename Rank2K::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillRandomUniform( view, seed, scope_max, scope_min, mantissa_in_bits); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, mantissa_in_bits); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential( view.data(), view.capacity()); } else { EXPECT_TRUE(false) << "Input distribution not implemented"; return false; } return true; } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_symmetric_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed, int mantissa_in_bits) { if (dist_kind == cutlass::Distribution::Uniform) { double scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename Rank2K::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillSymmetricRandomUniform( view, seed, Rank2K::kFillModeC, scope_max, scope_min, mantissa_in_bits); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillSymmetricRandomGaussian( view, seed, Rank2K::kFillModeC, 0, 0.5, mantissa_in_bits); } else { EXPECT_TRUE(false) << "Input distribution (symmetric tensor) not implemented"; return false; } return true; } /// Initializes data structures void initialize(cutlass::gemm::GemmCoord problem_size) { // // Allocate the Rank2K workspace // tensor_A.resize(problem_size.mk()); tensor_B.resize(problem_size.mk()); tensor_C.resize(problem_size.mn()); tensor_D.resize(problem_size.mn()); reference_D.resize(problem_size.mn(), false); EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019, cutlass::MantissaInBits<typename Rank2K::ElementA>::bits)); EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018, cutlass::MantissaInBits<typename Rank2K::ElementB>::bits)); EXPECT_TRUE(initialize_symmetric_tensor(tensor_C.host_view(), init_C, seed + 2017, cutlass::MantissaInBits<typename Rank2K::ElementC>::bits)); // It is possible to randomly initialize to all zeros, so override this with non-zeros // in the upper left corner of each operand. tensor_A.host_view().at({0, 0}) = typename Rank2K::ElementA(1); tensor_B.host_view().at({0, 0}) = typename Rank2K::ElementB(1); tensor_C.host_view().at({0, 0}) = typename Rank2K::ElementC(1); cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view()); tensor_A.sync_device(); tensor_B.sync_device(); tensor_C.sync_device(); tensor_D.sync_device(); } /// Compares computed reference with device reference and outputs to a file if incorrect bool compare_reference( cutlass::gemm::GemmCoord problem_size, ElementCompute alpha, ElementCompute beta) { tensor_D.sync_host(); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); if (tensor_D.size() > 1) EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); if (reference_D.size() > 1) EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); double l2_norm = cutlass::reference::host::TensorRelativeErrorMetric(reference_D.host_view(), tensor_D.host_view()); bool passed = l2_norm < cutlass::MantissaInBits<typename Rank2K::ElementA>::error; return passed; } /// Verifies the result is a Rank2K bool verify( cutlass::gemm::GemmCoord problem_size, ElementCompute alpha, ElementCompute beta) { // // Verify // cutlass::reference::host::Rank2KComplex< typename Rank2K::ElementA, typename Rank2K::LayoutA, typename Rank2K::ElementB, typename Rank2K::LayoutB, typename Rank2K::ElementC, typename Rank2K::LayoutC, ElementCompute, ElementAccumulator >( problem_size, alpha, tensor_A.host_ref(), Rank2K::kTransformA, tensor_B.host_ref(), Rank2K::kTransformB, beta, tensor_C.host_ref(), reference_D.host_ref(), ElementAccumulator(0), Rank2K::kFillModeC, Rank2K::kBlasMode ); return compare_reference(problem_size, alpha, beta); } /// Returns true if the CUDA device is sufficient to execute the kernel. bool sufficient() const { // // Determine SMEM requirements and waive if not satisfied // size_t smem_size = sizeof(typename Rank2K::Rank2Kkernel::SharedStorage); cudaDeviceProp properties; int device_idx; cudaError_t result = cudaGetDevice(&device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDevice() API call failed."); } result = cudaGetDeviceProperties(&properties, device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDeviceProperties() failed"); } if (properties.sharedMemPerBlockOptin < smem_size) { return false; } return true; } /// Executes one test bool run( cutlass::gemm::GemmUniversalMode mode, cutlass::gemm::GemmCoord problem_size, int batch_count = 1, ElementCompute alpha = ElementCompute(1), ElementCompute beta = ElementCompute(0)) { // Waive test if insufficient CUDA device if (!sufficient()) { if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { std::cerr << "Test waived due to insufficient CUDA device." << std::endl; } return true; } #if 0 std::cout << "[TestbedRank2KUniversal::run()] problem(m, n, k): " << problem_size << " alpha: " << ElementCompute(alpha) << " beta: " << ElementCompute(beta) << std::endl; #endif this->initialize(problem_size); // // Initialize the Rank2K operator // typename Rank2K::Arguments arguments{ mode, problem_size, batch_count, {alpha, beta}, tensor_A.device_data(), tensor_B.device_data(), tensor_C.device_data(), tensor_D.device_data(), problem_size.n() * problem_size.k(), problem_size.n() * problem_size.k(), problem_size.m() * problem_size.n(), problem_size.m() * problem_size.n(), tensor_A.layout().stride(0), tensor_B.layout().stride(0), tensor_C.layout().stride(0), tensor_D.layout().stride(0) }; Rank2K rank2k_op; size_t workspace_size = Rank2K::get_workspace_size(arguments); cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); cutlass::Status status = rank2k_op.initialize(arguments, workspace.get()); EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); // // Run the Rank2K // status = rank2k_op(); EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); // // Verify // bool passed = this->verify(problem_size, alpha, beta); //if (true) { if (!passed) { std::stringstream fname; fname << "error_Rank2k_device_" << "fill_mode_c_" << (Rank2K::kFillModeC == cutlass::FillMode::kLower ? "lower_" : (Rank2K::kFillModeC == cutlass::FillMode::kUpper ? "upper_" : "invalid_")) << "mnk_" << problem_size.m() << "x" << problem_size.n() << "x" << problem_size.k() << "_" << Rank2K::ThreadblockShape::kM << "x" << Rank2K::ThreadblockShape::kN << "x" << Rank2K::ThreadblockShape::kK << "_" << Rank2K::WarpShape::kM << "x" << Rank2K::WarpShape::kN << "x" << Rank2K::WarpShape::kK << ".txt"; std::cout << fname.str() << std::endl; std::ofstream results(fname.str()); results << problem_size << std::endl; results << "\nA:\n" << tensor_A.host_view() << "\n" << "\nB:\n" << tensor_B.host_view() << "\n" << "\nC:\n" << tensor_C.host_view() << "\n" << "\nD reference:\n" << reference_D.host_view() << "\n" << "\nD computed:\n" << tensor_D.host_view() << "\n"; } return passed; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Rank2K> bool TestRank2kUniversal( cutlass::gemm::GemmCoord const & problem_size, cutlass::gemm::GemmUniversalMode mode, int batch_count, double alpha = 1.0, double beta = 2.0) { bool passed = true; TestbedRank2KUniversal<Rank2K> testbed; using ElementCompute = typename Rank2K::EpilogueOutputOp::ElementCompute; passed = testbed.run( mode, problem_size, batch_count, cutlass::from_real<ElementCompute>(alpha), cutlass::from_real<ElementCompute>(beta) ); return passed; } template <typename Rank2K> bool TestAllRank2KUniversal() { bool passed = true; int const kMinimumOperandElementSize = int(cutlass::sizeof_bits<typename Rank2K::ElementA>::value); int const kAlignment = cutlass::platform::is_same< typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize; // int8_t gemm alignment constraints int const kAlignmentM = cutlass::platform::is_same<typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value && cutlass::platform::is_same<typename Rank2K::ElementA, int8_t>::value && cutlass::platform::is_same<typename Rank2K::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment; int const kAlignmentN = kAlignmentM; int const kAlignmentK = cutlass::platform::is_same<typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value && cutlass::platform::is_same<typename Rank2K::ElementA, int8_t>::value && cutlass::platform::is_same<typename Rank2K::LayoutA, cutlass::layout::RowMajor>::value ? 4 : kAlignment; cutlass::gemm::GemmUniversalMode modes[] = { cutlass::gemm::GemmUniversalMode::kGemm, }; int problem_size_n[] = { kAlignmentN, 512 - 2*kAlignmentN }; int problem_size_k[] = { kAlignmentK, Rank2K::ThreadblockShape::kK * Rank2K::kStages - kAlignmentK, Rank2K::ThreadblockShape::kK * Rank2K::kStages * 3 - kAlignmentK }; int batch_counts[] = { // may be interpretted as batch count or split-K slices 1 // Just running one batch for now (removing 2, 3, 5, 7) }; double problem_alpha[] = { 1.0, 3.25 }; double problem_beta[] = { 0.0, 2.15 }; using ElementCompute = typename Rank2K::EpilogueOutputOp::ElementCompute; for (cutlass::gemm::GemmUniversalMode mode : modes) { for (int n : problem_size_n) { for (int k : problem_size_k) { for (int batch_count : batch_counts) { for (auto alpha : problem_alpha) { for (auto beta : problem_beta) { if (mode == cutlass::gemm::GemmUniversalMode::kGemm || mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) { // skip very small K problems //if (k / batch_count < 2 * Rank2K::ThreadblockShape::kK) { // continue; //} } cutlass::gemm::GemmCoord problem_size(n, n, k); TestbedRank2KUniversal<Rank2K> testbed; passed = testbed.run( mode, problem_size, batch_count, cutlass::from_real<ElementCompute>(alpha), cutlass::from_real<ElementCompute>(beta) ); if (!passed) { return false; } } } } } } } return passed; } template <typename Rank2K> bool TestAllRank2KHermitianUniversal() { bool passed = true; using ElementCompute = typename Rank2K::EpilogueOutputOp::ElementCompute; using ElementAccumulator = typename Rank2K::ElementAccumulator; int const kMinimumOperandElementSize = int(cutlass::sizeof_bits<typename Rank2K::ElementA>::value); int const kAlignment = cutlass::platform::is_same< typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize; // int8_t gemm alignment constraints int const kAlignmentM = cutlass::platform::is_same<typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value && cutlass::platform::is_same<typename Rank2K::ElementA, int8_t>::value && cutlass::platform::is_same<typename Rank2K::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment; int const kAlignmentN = kAlignmentM; int const kAlignmentK = cutlass::platform::is_same<typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value && cutlass::platform::is_same<typename Rank2K::ElementA, int8_t>::value && cutlass::platform::is_same<typename Rank2K::LayoutA, cutlass::layout::RowMajor>::value ? 4 : kAlignment; cutlass::gemm::GemmUniversalMode modes[] = { cutlass::gemm::GemmUniversalMode::kGemm, }; int problem_size_n[] = { kAlignmentN, 512 - 2*kAlignmentN }; int problem_size_k[] = { kAlignmentK, Rank2K::ThreadblockShape::kK * Rank2K::kStages - kAlignmentK, Rank2K::ThreadblockShape::kK * Rank2K::kStages * 3 - kAlignmentK }; int batch_counts[] = { // may be interpretted as batch count or split-K slices 1 // Just running one batch for now (removing 2, 3, 5, 7) }; /* Complex alpha for HER2K */ ElementAccumulator problem_alpha[] = { {1.0}, {1.25, 3.25}, {-0.25, -2.25} }; ElementAccumulator problem_beta[] = { 0.0, -2.25 }; for (cutlass::gemm::GemmUniversalMode mode : modes) { for (int n : problem_size_n) { for (int k : problem_size_k) { for (int batch_count : batch_counts) { for (auto alpha : problem_alpha) { for (auto beta : problem_beta) { if (mode == cutlass::gemm::GemmUniversalMode::kGemm || mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) { // skip very small K problems //if (k / batch_count < 2 * Rank2K::ThreadblockShape::kK) { // continue; //} } cutlass::gemm::GemmCoord problem_size(n, n, k); TestbedRank2KUniversal<Rank2K> testbed; passed = testbed.run( mode, problem_size, batch_count, alpha, beta ); if (!passed) { return false; } } } } } } } return passed; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace gemm } // namespace test /////////////////////////////////////////////////////////////////////////////////////////////////
test/unit/gemm/device/testbed_rank2k_universal.h/0
{ "file_path": "test/unit/gemm/device/testbed_rank2k_universal.h", "repo_id": "test", "token_count": 8644 }
57
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include "mma_pipelined_testbed.h" ///////////////////////////////////////////////////////////////////////////////////////////////// // sgemm_NT ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM50_sgemm, sgemm_nt_32x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, float, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, float, // ElementB, cutlass::layout::RowMajor, // LayoutB, float, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass, 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_sgemm, sgemm_nt_64x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, float, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, float, // ElementB, cutlass::layout::RowMajor, // LayoutB, float, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_sgemm, sgemm_nt_32x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, float, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, float, // ElementB, cutlass::layout::RowMajor, // LayoutB, float, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_sgemm, sgemm_nt_64x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, float, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, float, // ElementB, cutlass::layout::RowMajor, // LayoutB, float, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 128, 16); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_sgemm, sgemm_nt_128x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, float, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, float, // ElementB, cutlass::layout::RowMajor, // LayoutB, float, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } ///////////////////////////////////////////////////////////////////////////////////////////////// // dgemm_NN ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM50_dgemm, dgemm_nt_32x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, double, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, double, // ElementB, cutlass::layout::RowMajor, // LayoutB, double, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_dgemm, dgemm_nt_64x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, double, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, double, // ElementB, cutlass::layout::RowMajor, // LayoutB, double, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_dgemm, dgemm_nt_32x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, double, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, double, // ElementB, cutlass::layout::RowMajor, // LayoutB, double, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_dgemm, dgemm_nt_64x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, double, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, double, // ElementB, cutlass::layout::RowMajor, // LayoutB, double, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 128, 16); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_dgemm, dgemm_nt_128x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, double, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, double, // ElementB, cutlass::layout::RowMajor, // LayoutB, double, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } ///////////////////////////////////////////////////////////////////////////////////////////////// // igemm_NN ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM50_igemm, igemm_nt_32x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, int, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_igemm, igemm_nt_64x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, int, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_igemm, igemm_nt_32x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, int, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_igemm, igemm_nt_64x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, int, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 128, 16); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_igemm, igemm_nt_128x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, int, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } ///////////////////////////////////////////////////////////////////////////////////////////////// // hgemm_NN ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM50_hgemm, hgemm_nt_32x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, cutlass::half_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, cutlass::half_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, cutlass::half_t, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_hgemm, hgemm_nt_64x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, cutlass::half_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, cutlass::half_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, cutlass::half_t, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_hgemm, hgemm_nt_32x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, cutlass::half_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, cutlass::half_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, cutlass::half_t, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_hgemm, hgemm_nt_64x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, cutlass::half_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, cutlass::half_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, cutlass::half_t, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 128, 16); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_hgemm, hgemm_nt_128x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, cutlass::half_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, cutlass::half_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, cutlass::half_t, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } ///////////////////////////////////////////////////////////////////////////////////////////////// // igemm_NT DP4A ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM61_igemm, igemm_int8_nt_64x64x16_64x64x4) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_64x64x32_64x64x4) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 32>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 32>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 4096); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_64x64x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_128x64x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_128x128x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_256x128x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<256, 256, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<128, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(256, 256, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_128x256x64_64x64x16) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 256, 64>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 256, 64); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_256x128x64_64x64x16) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<256, 128, 64>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(256, 128, 64); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_64x64x16_64x64x4) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_64x64x32_64x64x4) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 32>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 32>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 4096); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_64x64x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_128x64x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_128x128x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_256x128x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<256, 256, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<128, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(256, 256, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_128x256x64_64x64x16) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 256, 64>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 256, 64); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_256x128x64_64x64x16) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<256, 128, 64>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(256, 128, 64); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nn_64x64x16_64x64x4) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); }
test/unit/gemm/threadblock/mma_pipelined_simt.cu/0
{ "file_path": "test/unit/gemm/threadblock/mma_pipelined_simt.cu", "repo_id": "test", "token_count": 25788 }
58
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief unit tests for tensor layout */ #include "../common/cutlass_unit_test.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace layout { void test_NHWC_layout(int n_size, int h_size, int w_size, int c_size) { int ldc = c_size + 1; int ldw = ldc * (w_size + 2); int ldh = ldw * (h_size + 3); cutlass::layout::TensorNHWC::Stride tensor_stride({ ldc, ldw, ldh }); cutlass::layout::TensorNHWC tensor_nhwc(tensor_stride); // test pointer offset for (int n_idx = 0; n_idx < n_size; n_idx++) { for (int h_idx = 0; h_idx < h_size; h_idx++) { for (int w_idx = 0; w_idx < w_size; w_idx++) { for (int c_idx = 0; c_idx < c_size; c_idx++) { cutlass::Tensor4DCoord tensor_coord(n_idx, h_idx, w_idx, c_idx); auto ptr_offset = tensor_nhwc(tensor_coord); decltype(ptr_offset) reference_offset = c_idx + w_idx * ldc + h_idx * ldw + n_idx * ldh; EXPECT_EQ(ptr_offset, reference_offset); } } } } // test stride auto stride = tensor_nhwc.stride(); EXPECT_EQ(stride, tensor_stride); // test capacity auto capacity = tensor_nhwc.capacity(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); decltype(capacity) referece_capacity = ldh * n_size; EXPECT_EQ(capacity, referece_capacity); // test packed auto packed_tensor_layout = tensor_nhwc.packed(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); auto packed_stride = packed_tensor_layout.stride(); EXPECT_EQ(packed_stride, cutlass::layout::TensorNHWC::Stride({ c_size, w_size * c_size, h_size * w_size * c_size })); } void test_NCHW_layout(int n_size, int c_size, int h_size, int w_size) { int ldw = w_size + 1; int ldh = ldw * (h_size + 2); int ldc = ldh * (c_size + 1); cutlass::layout::TensorNCHW::Stride tensor_stride({ ldw, ldh, ldc }); cutlass::layout::TensorNCHW tensor_nchw(tensor_stride); // test pointer offset for (int n_idx = 0; n_idx < n_size; n_idx++) { for (int c_idx = 0; c_idx < c_size; c_idx++) { for (int h_idx = 0; h_idx < w_size; h_idx++) { for (int w_idx = 0; w_idx < c_size; w_idx++) { // tensor4DCoord is always created in nhwc order cutlass::Tensor4DCoord tensor_coord(n_idx, h_idx, w_idx, c_idx); auto ptr_offset = tensor_nchw(tensor_coord); decltype(ptr_offset) reference_offset = w_idx + h_idx * ldw + c_idx * ldh + n_idx * ldc; EXPECT_EQ(ptr_offset, reference_offset); } } } } // test stride auto stride = tensor_nchw.stride(); EXPECT_EQ(stride, tensor_stride); // test capacity auto capacity = tensor_nchw.capacity(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); decltype(capacity) referece_capacity = ldc * n_size; EXPECT_EQ(capacity, referece_capacity); // test packed auto packed_tensor_layout = tensor_nchw.packed(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); auto packed_stride = packed_tensor_layout.stride(); EXPECT_EQ(packed_stride, cutlass::layout::TensorNHWC::Stride({ w_size, w_size * h_size, w_size * h_size * c_size })); } } // namespace layout } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Layout_Tensor, NHWC_32_12_10_14) { int n_size = 32; int h_size = 12; int w_size = 10; int c_size = 14; test::layout::test_NHWC_layout(n_size, h_size, w_size, c_size); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Layout_Tensor, NCHW_32_12_10_14) { int n_size = 32; int c_size = 12; int h_size = 10; int w_size = 14; test::layout::test_NCHW_layout(n_size, c_size, h_size, w_size); } /////////////////////////////////////////////////////////////////////////////////////////////////
test/unit/layout/tensor.cu/0
{ "file_path": "test/unit/layout/tensor.cu", "repo_id": "test", "token_count": 2305 }
59
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit test for the PipelineTmaAsync class */ #define KERNEL_DBG_TRACE false #include "../common/cutlass_unit_test.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> #include <cute/arch/cluster_sm90.hpp> #include <cutlass/util/reference/host/gemm.h> #include <cutlass/cluster_launch.hpp> #include "cutlass/core_io.h" #include "cutlass/util/print_error.hpp" #include "cutlass/util/GPU_Clock.hpp" #include "testbed.h" #include "cutlass/pipeline/pipeline.hpp" #include "cutlass/arch/barrier.h" #include "cute/arch/cluster_sm90.hpp" using namespace cute; //////////////////// KERNEL ///////////////////////// template <uint32_t Stages> struct SharedStorage { typename cutlass::PipelineTmaAsync<Stages>::SharedStorage storage; }; // Goal of this kernel is to complete deadlock-free template <class ClusterShape, uint32_t NumStages> __global__ static void pipeline_device(uint32_t const NumIterations) { extern __shared__ char shared_memory[]; using MainloopPipeline = cutlass::PipelineTmaAsync<NumStages>; using PipelineState = cutlass::PipelineState<NumStages>; using SharedStorage = SharedStorage<NumStages>; SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory); [[maybe_unused]] auto cta_layout = Layout<ClusterShape>{}; // (m,n) -> cta_id int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); int warp_group_thread_idx = threadIdx.x % 128; dim3 block_id_in_cluster = cute::block_id_in_cluster(); auto cluster_shape = ClusterShape{}; // #Producers = #RowsInCluster + #ColsInCluster - 1 uint32_t const NumProducers = cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1; uint32_t const TmaTransactionBytes = sizeof(uint32_t) * NumProducers; uint32_t const per_cta_bytes = sizeof(uint32_t); // mbarrier.init typename MainloopPipeline::Params params; params.transaction_bytes = TmaTransactionBytes; params.role = MainloopPipeline::ThreadCategory::ProducerConsumer; params.is_leader = warp_group_thread_idx == 0; params.num_consumers = 128; MainloopPipeline pipeline(shared_storage.storage, params, cluster_shape); __syncthreads(); // Ensure All CTAs in Cluster have completed init before issuing commits cute::cluster_arrive_relaxed(); cute::cluster_wait(); // Total number of gemm_k_iterations auto mma_k_iterations = NumIterations; auto tma_k_iterations = NumIterations; PipelineState smem_pipe_read; // For the DMA (prologue) - we start with an opposite phase - since we skip all waits // i.e., we know that the buffer is indeed empty PipelineState smem_pipe_write = cutlass::make_producer_start_state<MainloopPipeline>(); PipelineState smem_pipe_release; int K_TILE_MMAS = 1; int lane_predicate = cute::elect_one_sync(); int k_pipe_tma_prologue = min(NumStages, tma_k_iterations); // DMA Prologue (Loads) CUTLASS_PRAGMA_UNROLL for(int i = 0; i < k_pipe_tma_prologue; ++i) { pipeline.producer_acquire(smem_pipe_write); // cp.async.bulk.tensor would typically happen here pipeline.producer_commit(smem_pipe_write, per_cta_bytes); ++smem_pipe_write; } tma_k_iterations -= k_pipe_tma_prologue; // MMA Prologue (Compute) - modeling inflight MMAs for (int iter = 0; iter < K_TILE_MMAS; ++iter) { pipeline.consumer_wait(smem_pipe_read); warpgroup_arrive(); // GMMA would typically happen here ++smem_pipe_read; } mma_k_iterations -= K_TILE_MMAS; CUTLASS_PRAGMA_NO_UNROLL for (int iter = 0; iter < mma_k_iterations; ++iter) { pipeline.consumer_wait(smem_pipe_read); warpgroup_arrive(); // GMMA would typically happen here pipeline.consumer_release(smem_pipe_release); if (lane_predicate && (warp_idx == 0) && (tma_k_iterations > 0)) { pipeline.producer_acquire(smem_pipe_write); // cp.async.bulk.tensor would typically happen here pipeline.producer_commit(smem_pipe_write, per_cta_bytes); ++smem_pipe_write; --tma_k_iterations; } // next read stage ++smem_pipe_read; ++smem_pipe_release; } // To make sure remote SMEM doesn't get destoryed cute::cluster_arrive(); cute::cluster_wait(); } ///////////////////////////////////////////////////// /// Device NT GMMA + TMA specialized template<uint32_t Stages_, typename ClusterShape_> struct PipelineTest { // // Data members // static constexpr uint32_t Stages = Stages_; static constexpr uint32_t kBlockSize = 128; using ClusterShape = ClusterShape_; // // Methods // // Ctor PipelineTest(){}; // Run CuTe GEMM kernel cudaError_t run(uint32_t const kNumIters, cudaStream_t stream = 0) { float elapsed_ms = 0.0f; // Pipeline (multistage pipeline) [[maybe_unused]] auto num_stages = Int<Stages>{}; auto cluster_shape = Shape<Int<ClusterShape::kM>, Int<ClusterShape::kN>, _1>{}; // // Configure and launch // int iterations = 1; cudaEvent_t events[2]; cudaError_t result; for (cudaEvent_t & event : events) { result = cudaEventCreate(&event); if (result != cudaSuccess) { std::cerr << "Error: Failed to create event."; return result; } } result = cudaEventRecord(events[0]); if (result != cudaSuccess) { std::cerr << "Error: Failed to record start event."; return result; } for (int iter = 0; iter < iterations; ++iter) { int smem_size = int(sizeof(SharedStorage<Stages>)); result = cudaFuncSetAttribute( pipeline_device<decltype(cluster_shape), Stages>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); // Launch a single Cluster, with 128 thread per CTA dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), 1); dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1); dim3 dimBlock(kBlockSize,1,1); const void* kernel = (const void*)pipeline_device<decltype(cluster_shape), Stages>; int iters = kNumIters; void* kernel_params[] = {reinterpret_cast<void*>(&iters)}; cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params); } // profiling loop ends result = cudaEventRecord(events[1]); if (result != cudaSuccess) { std::cerr << "Error: Failed to record stop event."; return result; } result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl; return result; } result = cudaEventElapsedTime(&elapsed_ms, events[0], events[1]); if (result != cudaSuccess) { std::cerr << "Failed to create event."; return result; } for (cudaEvent_t & event : events) { (void)cudaEventDestroy(event); } return cudaSuccess; } }; #if CUDA_12_0_SM90_FEATURES_SUPPORTED TEST(SM90_Verify_PipelineTmaAsync, Cluster1x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x1_Stage5) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 5; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x1_Stage10) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 10; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x2_Stage5) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 5; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x2_Stage10) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 10; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x4_Stage10) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; static constexpr uint32_t Stages = 10; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x2_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x2_Stage10) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; static constexpr uint32_t Stages = 10; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x1_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x1_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x4_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x4_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x2_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } #endif
test/unit/pipeline/pipeline_tma_async.cu/0
{ "file_path": "test/unit/pipeline/pipeline_tma_async.cu", "repo_id": "test", "token_count": 5494 }
60
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Manifest of CUTLASS Library This is the root of the data structure containing CUTLASS objects */ #pragma once #include <list> #include <memory> #include <map> /////////////////////////////////////////////////////////////////////////////////////////////////// #include "library.h" /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace library { /////////////////////////////////////////////////////////////////////////////////////////////////// // Forward declaration class Manifest; // init and insert all cutlass gemm operations in manifest object (procedurally generated using generator.py) void initialize_all(Manifest &manifest); // init and insert all reduction op in manifest object (manually instantiated in library/reduction) void initialize_all_reduction_op(Manifest &manifest); ///////////////////////////////////////////////////////////////////////////////////////////////////////// /// List of operations using OperationVector = std::vector<std::unique_ptr<Operation>>; /////////////////////////////////////////////////////////////////////////////////////////////////// /// Manifest of CUTLASS Library class Manifest { private: /// Operation provider Provider provider_; /// Global list of operations OperationVector operations_; public: Manifest (Provider provider = library::Provider::kCUTLASS) : provider_(provider) { } /// Top-level initialization Status initialize(); /// Used for initialization void reserve(size_t operation_count); /// Graceful shutdown Status release(); /// Appends an operation and takes ownership void append(Operation *operation_ptr) {\ // This function is inline s.t. it is present in generated libraries // without having to compile or link in manifest.cpp operations_.emplace_back(operation_ptr); } /// Returns an iterator to the first operation OperationVector const &operations() const; /// Returns a const iterator OperationVector::const_iterator begin() const; /// Returns a const iterator OperationVector::const_iterator end() const; }; /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////////
tools/library/include/cutlass/library/manifest.h/0
{ "file_path": "tools/library/include/cutlass/library/manifest.h", "repo_id": "tools", "token_count": 964 }
61
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief */ #include <string> #include <stdexcept> #include <sstream> #include "cutlass/library/util.h" #include "cutlass/profiler/problem_space.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static T lexical_cast(std::string const &str) { std::stringstream ss; T value; ss << str; ss >> value; return value; } ///////////////////////////////////////////////////////////////////////////////////////////////// std::ostream & KernelArgument::ValueIterator::print(std::ostream &out) const { out << "[" << (void *)this << " " << argument->qualified_name() << "] "; if (this->null_argument) { out << "<null>"; } else { out << "<not null>"; } return out; } KernelArgument::~KernelArgument() { } ////////////////////////////////////////////////////////////////////////////////////////////////// ScalarArgument::ScalarValue::ScalarValue( std::string const &value_, ScalarArgument const *argument_, bool not_null_ ): KernelArgument::Value(argument_, not_null_), value(value_) { } std::ostream &ScalarArgument::ScalarValue::print(std::ostream &out) const { out << argument->qualified_name() << ": "; if (not_null) { out << value; } else { out << "<null>"; } return out; } ScalarArgument::ScalarValueIterator::ScalarValueIterator( ScalarArgument const *argument_ ): KernelArgument::ValueIterator(argument_) { if (argument_) { value_it = argument_->values.begin(); } } void ScalarArgument::ScalarValueIterator::operator++() { if (this->null_argument) { this->null_argument = false; } else { ++value_it; } } bool ScalarArgument::ScalarValueIterator::operator==(ValueIterator const &it) const { if (it.type() != ArgumentTypeID::kScalar) { throw std::runtime_error("Cannot compare ScalarValueIterator with iterator of different type"); } auto const & scalar_it = static_cast<ScalarValueIterator const &>(it); return value_it == scalar_it.value_it; } /// Gets the value pointed to std::unique_ptr<KernelArgument::Value> ScalarArgument::ScalarValueIterator::at() const { if (this->null_argument) { return std::unique_ptr<KernelArgument::Value>( new ScalarArgument::ScalarValue( std::string(), static_cast<ScalarArgument const *>(argument), false)); } else { return std::unique_ptr<KernelArgument::Value>( new ScalarArgument::ScalarValue( *value_it, static_cast<ScalarArgument const *>(argument))); } } std::unique_ptr<KernelArgument::ValueIterator> ScalarArgument::begin() const { return std::unique_ptr<KernelArgument::ValueIterator>(new ScalarValueIterator(this)); } std::unique_ptr<KernelArgument::ValueIterator> ScalarArgument::end() const { ScalarValueIterator *it = new ScalarValueIterator(this); it->value_it = this->values.end(); it->null_argument = false; return std::unique_ptr<ValueIterator>(it); } ////////////////////////////////////////////////////////////////////////////////////////////////// IntegerArgument::IntegerValue::IntegerValue( int64_t value_, IntegerArgument const *argument_, bool not_null_ ): KernelArgument::Value(argument_, not_null_), value(value_) { } /// Pretty printer for debugging std::ostream &IntegerArgument::IntegerValue::print(std::ostream &out) const { out << argument->qualified_name() << ": "; if (not_null) { out << value; } else { out << "<null>"; } return out; } IntegerArgument::IntegerValueIterator::IntegerValueIterator(IntegerArgument const *argument_): KernelArgument::ValueIterator(argument_) { if (argument_) { range_it = argument_->ranges.begin(); if (range_it != argument_->ranges.end()) { value_it = range_it->begin(); } } } void IntegerArgument::IntegerValueIterator::operator++() { if (this->null_argument) { this->null_argument = false; } else { ++value_it; if (value_it == range_it->end()) { ++range_it; if (range_it != static_cast<IntegerArgument const *>(argument)->ranges.end()) { value_it = range_it->begin(); } } } } bool IntegerArgument::IntegerValueIterator::operator==(ValueIterator const &it) const { if (it.type() != ArgumentTypeID::kInteger) { throw std::runtime_error("Cannot compare IntegerValueIterator with iterator of different type"); } auto const & integer_iterator = static_cast<IntegerValueIterator const &>(it); if (this->null_argument) { return it.null_argument; } else { if (range_it != integer_iterator.range_it) { return false; } if (range_it == static_cast<IntegerArgument const *>(argument)->ranges.end() && range_it == integer_iterator.range_it) { return true; } return value_it == integer_iterator.value_it; } } std::unique_ptr<KernelArgument::Value> IntegerArgument::IntegerValueIterator::at() const { if (this->null_argument) { return std::unique_ptr<KernelArgument::Value>( new IntegerArgument::IntegerValue( 0, static_cast<IntegerArgument const *>(argument), false)); } else { return std::unique_ptr<KernelArgument::Value>( new IntegerArgument::IntegerValue( *value_it, static_cast<IntegerArgument const *>(argument))); } } std::unique_ptr<KernelArgument::ValueIterator> IntegerArgument::begin() const { return std::unique_ptr<KernelArgument::ValueIterator>(new IntegerValueIterator(this)); } std::unique_ptr<KernelArgument::ValueIterator> IntegerArgument::end() const { IntegerValueIterator *it = new IntegerValueIterator(this); it->range_it = this->ranges.end(); it->null_argument = false; return std::unique_ptr<ValueIterator>(it); } ////////////////////////////////////////////////////////////////////////////////////////////////// TensorArgument::TensorValue::TensorValue( TensorDescription const &desc_, TensorArgument const *argument_, bool not_null_ ): KernelArgument::Value(argument_, not_null_), desc(desc_) { } /// Pretty printer for debugging std::ostream &TensorArgument::TensorValue::print(std::ostream &out) const { out << argument->qualified_name() << ": " << to_string(desc.element) << ": " << to_string(desc.layout); return out; } TensorArgument::TensorValueIterator::TensorValueIterator( TensorArgument const *argument_ ): KernelArgument::ValueIterator(argument_) { if (argument_) { value_it = argument_->values.begin(); } } void TensorArgument::TensorValueIterator::operator++() { if (this->null_argument) { this->null_argument = false; } else { ++value_it; } } bool TensorArgument::TensorValueIterator::operator==(ValueIterator const &it) const { if (it.type() != ArgumentTypeID::kTensor) { throw std::runtime_error("Cannot compare TensorValueIterator with iterator of different type"); } auto const & tensor_it = static_cast<TensorValueIterator const &>(it); return value_it == tensor_it.value_it; } /// Gets the value pointed to std::unique_ptr<KernelArgument::Value> TensorArgument::TensorValueIterator::at() const { if (this->null_argument) { return std::unique_ptr<KernelArgument::Value>( new TensorArgument::TensorValue( TensorDescription(), static_cast<TensorArgument const *>(argument), false)); } else { return std::unique_ptr<KernelArgument::Value>( new TensorArgument::TensorValue( *value_it, static_cast<TensorArgument const *>(argument))); } } std::unique_ptr<KernelArgument::ValueIterator> TensorArgument::begin() const { return std::unique_ptr<KernelArgument::ValueIterator>(new TensorValueIterator(this)); } std::unique_ptr<KernelArgument::ValueIterator> TensorArgument::end() const { TensorValueIterator *it = new TensorValueIterator(this); it->value_it = this->values.end(); it->null_argument = false; return std::unique_ptr<ValueIterator>(it); } ////////////////////////////////////////////////////////////////////////////////////////////////// EnumeratedTypeArgument::EnumeratedTypeValue::EnumeratedTypeValue( std::string const & element_, EnumeratedTypeArgument const *argument_, bool not_null_ ): KernelArgument::Value(argument_, not_null_), element(element_) { } /// Pretty printer for debugging std::ostream &EnumeratedTypeArgument::EnumeratedTypeValue::print(std::ostream &out) const { out << argument->qualified_name() << ": " << element; return out; } EnumeratedTypeArgument::EnumeratedTypeValueIterator::EnumeratedTypeValueIterator( EnumeratedTypeArgument const *argument_ ): KernelArgument::ValueIterator(argument_) { if (argument_) { value_it = argument_->values.begin(); } } void EnumeratedTypeArgument::EnumeratedTypeValueIterator::operator++() { if (this->null_argument) { this->null_argument = false; } else { ++value_it; } } bool EnumeratedTypeArgument::EnumeratedTypeValueIterator::operator==(ValueIterator const &it) const { if (it.type() != ArgumentTypeID::kEnumerated) { throw std::runtime_error("Cannot compare EnumeratedTypeValueIterator with iterator of different type"); } auto const & enumerated_type_it = static_cast<EnumeratedTypeValueIterator const &>(it); return value_it == enumerated_type_it.value_it; } /// Gets the value pointed to std::unique_ptr<KernelArgument::Value> EnumeratedTypeArgument::EnumeratedTypeValueIterator::at() const { if (this->null_argument) { return std::unique_ptr<KernelArgument::Value>( new EnumeratedTypeValue( std::string(), static_cast<EnumeratedTypeArgument const *>(argument), false)); } else { return std::unique_ptr<KernelArgument::Value>( new EnumeratedTypeValue( *value_it, static_cast<EnumeratedTypeArgument const *>(argument))); } } std::unique_ptr<KernelArgument::ValueIterator> EnumeratedTypeArgument::begin() const { return std::unique_ptr<KernelArgument::ValueIterator>(new EnumeratedTypeValueIterator(this)); } std::unique_ptr<KernelArgument::ValueIterator> EnumeratedTypeArgument::end() const { EnumeratedTypeValueIterator *it = new EnumeratedTypeValueIterator(this); it->value_it = this->values.end(); it->null_argument = false; return std::unique_ptr<ValueIterator>(it); } ////////////////////////////////////////////////////////////////////////////////////////////////// ProblemSpace::Iterator::Iterator() { } ProblemSpace::Iterator::Iterator(ProblemSpace const &problem_space) { for (auto const & arg_ptr : problem_space.arguments) { construct_(arg_ptr.get()); } } ProblemSpace::Iterator::Iterator(Iterator && it) { iterators = std::move(it.iterators); } /// Helper for recursively constructing iterators void ProblemSpace::Iterator::construct_(KernelArgument const *argument) { iterators.emplace_back(argument->begin()); } /// Given a set of ranges, iterate over the points within their Cartesian product. No big deal. void ProblemSpace::Iterator::operator++() { // Define a pair of iterator into the vector of iterators. IteratorVector::iterator iterator_it = iterators.begin(); IteratorVector::iterator next_iterator = iterator_it; // Advance the first argument. ++(**iterator_it); // Maintain a pair of iterators over consecutive arguments. ++next_iterator; // Carry logic while (next_iterator != iterators.end() && **iterator_it == *((*iterator_it)->argument->end())) { // Did an iterator reach the end of its range? (*iterator_it) = (*iterator_it)->argument->begin(); // Reset that iterator, ++(**next_iterator); // and increment the next argument's iterator. iterator_it = next_iterator; // Advance to the next argument ++next_iterator; } } /// Moves iterator to end void ProblemSpace::Iterator::move_to_end() { if (!iterators.empty()) { std::unique_ptr<KernelArgument::ValueIterator> new_iter = iterators.back()->argument->end(); std::swap(iterators.back(), new_iter); } } ProblemSpace::Problem ProblemSpace::Iterator::at() const { Problem problem; for (std::unique_ptr<KernelArgument::ValueIterator> const & it : iterators) { problem.emplace_back(it->at()); } return problem; } /// Equality operator bool ProblemSpace::Iterator::operator==(Iterator const &it) const { // This would be an opportunity for auto, but explicitly denoting references to // owning smart pointers to dynamic polymorphic objects seems like a kindness to the reader. IteratorVector::const_iterator first_it = iterators.begin(); IteratorVector::const_iterator second_it = it.iterators.begin(); int idx = 0; for (; first_it != iterators.end(); ++first_it, ++second_it, ++idx) { KernelArgument::ValueIterator const *my_it = first_it->get(); KernelArgument::ValueIterator const *their_it = second_it->get(); if (*my_it != *their_it) { return false; } } return true; } std::ostream &ProblemSpace::Iterator::print(std::ostream &out) const { for (std::unique_ptr<KernelArgument::ValueIterator> const & iter_ptr : iterators) { out << " [iter " << (iter_ptr->null_argument ? "null" : "<not null>") << ", type: " << to_string(iter_ptr->argument->description->type) << "]" << std::endl; } return out; } ///////////////////////////////////////////////////////////////////////////////////////////////// ProblemSpace::ProblemSpace(ArgumentDescriptionVector const &schema, CommandLine const &cmdline) { // Clone the arguments for (ArgumentDescription const & arg_desc : schema) { clone_(arguments, &arg_desc); } // Parse values from the command line for (auto & arg : arguments) { parse_(arg.get(), cmdline); } } /// Returns the index of an argument by name size_t ProblemSpace::argument_index(char const *name) const { return argument_index_map.at(name); } /// Helper for recursively cloning void ProblemSpace::clone_( KernelArgumentVector &kernel_args, ArgumentDescription const *arg_desc) { KernelArgument *kernel_arg = nullptr; switch (arg_desc->type) { case ArgumentTypeID::kScalar: kernel_arg = new ScalarArgument(arg_desc); break; case ArgumentTypeID::kInteger: kernel_arg = new IntegerArgument(arg_desc); break; case ArgumentTypeID::kTensor: kernel_arg = new TensorArgument(arg_desc); break; case ArgumentTypeID::kStructure: { throw std::runtime_error("ArgumentTypeID::kStructure not supported"); } break; case ArgumentTypeID::kEnumerated: kernel_arg = new EnumeratedTypeArgument(arg_desc); break; default: break; } if (kernel_arg) { size_t idx = kernel_args.size(); for (auto const &alias : arg_desc->aliases) { argument_index_map.insert(std::make_pair(alias, idx)); } kernel_args.emplace_back(kernel_arg); } } /// Parses a command line void ProblemSpace::parse_(KernelArgument *arg, CommandLine const &cmdline) { switch (arg->description->type) { case ArgumentTypeID::kScalar: { auto * scalar = static_cast<ScalarArgument *>(arg); for (auto const &alias : arg->description->aliases) { if (cmdline.check_cmd_line_flag(alias.c_str())) { std::vector<std::vector<std::string>> tokens; cmdline.get_cmd_line_argument_ranges(alias.c_str(), tokens); for (auto const & vec : tokens) { if (!vec.empty()) { scalar->values.push_back(vec.front()); } } break; } } } break; case ArgumentTypeID::kInteger: { auto *integer = static_cast<IntegerArgument *>(arg); for (auto const &alias : arg->description->aliases) { if (cmdline.check_cmd_line_flag(alias.c_str())) { std::vector<std::vector<std::string> > tokens; cmdline.get_cmd_line_argument_ranges(alias.c_str(), tokens); for (auto &range_tokens : tokens) { if (!range_tokens.empty()) { Range range; if (range_tokens.front() == "rand") { range.mode = Range::Mode::kRandom; } else if (range_tokens.front() == "randlg2") { range.mode = Range::Mode::kRandomLog2; } switch (range.mode) { case Range::Mode::kSequence: { range.first = lexical_cast<int64_t>(range_tokens.front()); if (range_tokens.size() > 1) { range.last = lexical_cast<int64_t>(range_tokens.at(1)); } else { range.last = range.first; } if (range_tokens.size() > 2) { range.increment = lexical_cast<int64_t>(range_tokens.at(2)); } else { range.increment = 1; } } break; case Range::Mode::kRandom: // fall-through case Range::Mode::kRandomLog2: { if (range_tokens.size() < 4) { throw std::runtime_error( "Range of mode 'rand' must have four tokens showing " "the minimum, maximum, and number of iterations. For example, " "rand:16:128:1000"); } range.minimum = lexical_cast<int64_t>(range_tokens.at(1)); range.maximum = lexical_cast<int64_t>(range_tokens.at(2)); range.first = 1; range.last = lexical_cast<int64_t>(range_tokens.at(3)); range.increment = 1; if (range_tokens.size() > 4) { range.divisible = lexical_cast<int64_t>(range_tokens.at(4)); } } break; default: throw std::runtime_error("Unsupported range mode."); break; } integer->ranges.push_back(range); } } break; } } } break; case ArgumentTypeID::kTensor: { auto *tensor = static_cast<TensorArgument *>(arg); for (auto const &alias : arg->description->aliases) { if (cmdline.check_cmd_line_flag(alias.c_str())) { std::vector<std::vector<std::string>> tokens; cmdline.get_cmd_line_argument_ranges(alias.c_str(), tokens); for (auto const & tensor_tokens : tokens) { if (!tensor_tokens.empty()) { TensorArgument::TensorDescription tensor_desc; tensor_desc.element = cutlass::library::from_string<library::NumericTypeID>(tensor_tokens.front()); // Layout if (tensor_tokens.size() > 1) { tensor_desc.layout = cutlass::library::from_string<library::LayoutTypeID>(tensor_tokens.at(1)); } // Stride for (size_t i = 2; i < tensor_tokens.size(); ++i) { tensor_desc.stride.push_back(lexical_cast<int>(tensor_tokens.at(i))); } tensor->values.push_back(tensor_desc); } } break; } } } break; case ArgumentTypeID::kStructure: { throw std::runtime_error("Structure arguments not supported"); } break; case ArgumentTypeID::kEnumerated: { auto *enumerated_type = static_cast<EnumeratedTypeArgument *>(arg); for (auto const &alias : arg->description->aliases) { if (cmdline.check_cmd_line_flag(alias.c_str())) { std::vector<std::string> tokens; cmdline.get_cmd_line_arguments(alias.c_str(), tokens); for (auto const & token : tokens) { enumerated_type->values.push_back(token); } break; } } } break; default: break; } } ///////////////////////////////////////////////////////////////////////////////////////////////// ProblemSpace::Iterator ProblemSpace::begin() const { return ProblemSpace::Iterator(*this); } ProblemSpace::Iterator ProblemSpace::end() const { ProblemSpace::Iterator it(*this); it.move_to_end(); return it; } /// Gets all argument names as an ordered vector std::vector<std::string> ProblemSpace::argument_names() const { Problem problem = this->begin().at(); std::vector<std::string> names; names.reserve(problem.size()); for (auto const & arg : problem) { names.push_back(arg->argument->description->aliases.front()); } return names; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_int(int64_t &int_value, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kInteger) { int_value = static_cast<IntegerArgument::IntegerValue const *>(value_ptr)->value; } else if (value_ptr->argument->description->type == ArgumentTypeID::kScalar) { std::stringstream ss; ss << static_cast<ScalarArgument::ScalarValue const *>(value_ptr)->value; ss >> int_value; } else { throw std::runtime_error( "arg_as_int64_t() - illegal cast. Problem space argument must be integer or scalar"); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_int(int &int_value, KernelArgument::Value const *value_ptr) { int64_t value64; bool obtained = arg_as_int(value64, value_ptr); if (obtained) { int_value = int(value64); return true; } return false; } /// Lexically casts an argument to an int bool arg_as_int( int &int_value, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_int(int_value, value_ptr); } /// Lexically casts an argument to an int64 bool arg_as_int( int64_t &int_value, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_int(int_value, value_ptr); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_NumericTypeID( library::NumericTypeID &numeric_type, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { numeric_type = library::from_string<library::NumericTypeID>( static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element); if (numeric_type == library::NumericTypeID::kInvalid) { throw std::runtime_error( "arg_as_NumericTypeID() - illegal cast."); } } else { throw std::runtime_error( "arg_as_NumericTypeID() - illegal cast."); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_NumericTypeID( library::NumericTypeID &numeric_type, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_NumericTypeID(numeric_type, value_ptr); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_RasterOrder( library::RasterOrder &raster_order, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { raster_order = library::from_string<library::RasterOrder>( static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element); if (raster_order == library::RasterOrder::kInvalid) { throw std::runtime_error( "arg_as_RasterOrder() - illegal cast."); } } else { throw std::runtime_error( "arg_as_RasterOrder() - illegal cast."); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_RasterOrder( library::RasterOrder &raster_order, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_RasterOrder(raster_order, value_ptr); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_LayoutTypeID( library::LayoutTypeID &layout_type, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { layout_type = library::from_string<library::LayoutTypeID>( static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element); if (layout_type == library::LayoutTypeID::kInvalid) { throw std::runtime_error( "arg_as_LayoutTypeID() - illegal cast."); } } else { throw std::runtime_error( "arg_as_LayoutTypeID() - illegal cast."); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_LayoutTypeID( library::LayoutTypeID &layout_type, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_LayoutTypeID(layout_type, value_ptr); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_OpcodeClassID( library::OpcodeClassID &opcode_class, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { opcode_class = library::from_string<library::OpcodeClassID>( static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element); if (opcode_class == library::OpcodeClassID::kInvalid) { throw std::runtime_error( "arg_as_OpcodeClassID() - illegal cast."); } } else { throw std::runtime_error( "arg_as_OpcodeClassID() - illegal cast."); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_OpcodeClassID( library::OpcodeClassID &opcode_class, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_OpcodeClassID(opcode_class, value_ptr); } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_SplitKModeID( library::SplitKMode &split_k_mode, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { split_k_mode = library::from_string<library::SplitKMode>( static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element); if (split_k_mode == library::SplitKMode::kInvalid) { throw std::runtime_error( "arg_as_SplitKModeID() - illegal cast."); } } else { throw std::runtime_error( "arg_as_SplitKModeID() - illegal cast."); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_SplitKModeID( library::SplitKMode &split_k_mode, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_SplitKModeID(split_k_mode, value_ptr); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_ConvModeID( library::ConvModeID &conv_mode, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { conv_mode = library::from_string<library::ConvModeID>( static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element); if (conv_mode == library::ConvModeID::kInvalid) { throw std::runtime_error( "arg_as_ConvModeID() - illegal cast."); } } else { throw std::runtime_error( "arg_as_ConvModeID() - illegal cast."); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_ConvModeID( library::ConvModeID &conv_mode, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_ConvModeID(conv_mode, value_ptr); } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_ProviderID( library::Provider &provider, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { provider = library::from_string<library::Provider>( static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element); if (provider == library::Provider::kInvalid) { throw std::runtime_error( "arg_as_ProviderID() - illegal cast."); } } else { throw std::runtime_error( "arg_as_ProviderID() - illegal cast."); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_ProviderID( library::Provider &provider, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_ProviderID(provider, value_ptr); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Lexically casts an argument to a given type stored in a byte array. Returns true if not null. bool arg_as_scalar( std::vector<uint8_t> &bytes, library::NumericTypeID numeric_type, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kInteger) { int64_t int_value = static_cast<IntegerArgument::IntegerValue const *>(value_ptr)->value; // TODO - convert int64_t => destination type } else if (value_ptr->argument->description->type == ArgumentTypeID::kScalar) { std::string const &str_value = static_cast<ScalarArgument::ScalarValue const *>(value_ptr)->value; return lexical_cast(bytes, numeric_type, str_value); } else { throw std::runtime_error( "arg_as_int() - illegal cast. Problem space argument must be integer or scalar"); } return true; } return false; } /// Lexically casts an argument to a given type and returns a byte array bool arg_as_scalar( std::vector<uint8_t> &bytes, library::NumericTypeID numeric_type, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_scalar(bytes, numeric_type, value_ptr); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns true if a tensor description satisfies a `tensor` value bool tensor_description_satisfies( library::TensorDescription const &tensor_desc, TensorArgument::TensorValue const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->desc.element != library::NumericTypeID::kUnknown && value_ptr->desc.element != tensor_desc.element) { return false; } if (value_ptr->desc.layout != library::LayoutTypeID::kUnknown && value_ptr->desc.layout != tensor_desc.layout) { return false; } } return true; } /// Returns true if a tensor description satisfies a `tensor` value bool tensor_description_satisfies( library::TensorDescription const &tensor_desc, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); if (value_ptr->argument->description->type == ArgumentTypeID::kTensor) { return tensor_description_satisfies( tensor_desc, static_cast<TensorArgument::TensorValue const *>(value_ptr)); } else { throw std::runtime_error("Kernel argument mismatch"); } return false; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns true if conv_kind satisfies the value bool conv_kind_satisfies( library::ConvKind const &conv_kind, EnumeratedTypeArgument::EnumeratedTypeValue const *value_ptr) { if (value_ptr->not_null) { library::ConvKind conv_kind_cmd_line = library::from_string<library::ConvKind>(value_ptr->element); if (conv_kind_cmd_line != library::ConvKind::kUnknown && conv_kind_cmd_line != conv_kind) { return false; } } return true; } /// Returns true if conv_kind satisfies the value bool conv_kind_satisfies( library::ConvKind const &conv_kind, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { return conv_kind_satisfies( conv_kind, static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)); } else { throw std::runtime_error("Kernel argument mismatch"); } return false; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns true if a iterator algorithm satisfies the value bool iterator_algorithm_satisfies( library::IteratorAlgorithmID const &iterator_algorithm, EnumeratedTypeArgument::EnumeratedTypeValue const *value_ptr) { if (value_ptr->not_null) { library::IteratorAlgorithmID iterator_algorithm_cmd_line = library::from_string<library::IteratorAlgorithmID>(value_ptr->element); if (iterator_algorithm_cmd_line != library::IteratorAlgorithmID::kNone && iterator_algorithm_cmd_line != iterator_algorithm) { return false; } } return true; } /// Returns true if a iterator algorithm satisfies the value bool iterator_algorithm_satisfies( library::IteratorAlgorithmID const &iterator_algorithm, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { return iterator_algorithm_satisfies( iterator_algorithm, static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)); } else { throw std::runtime_error("Kernel argument mismatch"); } return false; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
tools/profiler/src/problem_space.cpp/0
{ "file_path": "tools/profiler/src/problem_space.cpp", "repo_id": "tools", "token_count": 13631 }
62
/****************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ #pragma once /** * \file * \brief cuda kernels for padding in device memory with NHWC layout. */ #include "cutlass/cutlass.h" #include "cutlass/layout/tensor.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_coord.h" #include "cutlass/tensor_ref.h" namespace cutlass { /** \brief interface for padding in a device memory tensor with NHWC layout * \tparam T: data type */ template <typename T> void nhwc_padding(cutlass::Tensor4DCoord input_tensor_size, cutlass::Tensor4DCoord output_tensor_size, TensorRef<T, layout::TensorNHWC> ref_input, TensorRef<T, layout::TensorNHWC> ref_output, cudaStream_t stream); template <typename T> __global__ void nhwc_padding_kernel(const int32_t n, const int32_t h, const int32_t w, const int32_t c_in, const int32_t c_out, const T zero, const T *input, T *output){ const int32_t idx_jump = blockDim.x * gridDim.x; const int32_t total_elements = n * h * w * c_out; int32_t c_idx, w_idx, h_idx, n_idx, resudial; T value; for (int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < total_elements; idx += idx_jump) { c_idx = idx%c_out; if (c_idx >= c_in){ value = zero; } else{ resudial = idx/c_out; w_idx = resudial%w; resudial = resudial/w; h_idx = resudial%h; n_idx = resudial/h; resudial = ((n_idx * h + h_idx) * w + w_idx) * c_in + c_idx; value = input[resudial]; } output[idx] = value; } } // fast kernel for c_in = 3 & c_out = 4 template <typename Tio, typename Telement, int element_in_Tio> __global__ void nhwc_padding_channel_3To4_kernel(const int32_t n, const int32_t h, const int32_t w, const Tio *input, Tio *output, const int32_t max_output_element, const int32_t max_input_element, const Tio zero_io, const Telement zero_element){ __shared__ Tio shm[192]; const int tidx = blockIdx.x * 192 + threadIdx.x; const int threadidx = threadIdx.x; shm[threadIdx.x] = tidx >= max_input_element ? zero_io : input[tidx]; __syncthreads(); const int output_offset = blockIdx.x * 256; const int lower_bound = max_output_element < output_offset + 256 ? max_output_element : output_offset + 256; for (int i = output_offset + threadidx, j = threadidx ; i < lower_bound ; i+=192, j+=192) { const Telement* shm_element = (const Telement*)shm + j*3*element_in_Tio/4; Telement array[element_in_Tio]; CUTLASS_PRAGMA_UNROLL for (int k = 0 ; k < element_in_Tio ; k++) array[k] = ((k+1)%4 == 0) ? zero_element : shm_element[(k > 3) ? (k - 1) : k]; output[i] = *((const Tio *)array); } } // fast kernel for c_in = 3 & c_out = 8 template <typename Tio, typename Telement, int element_in_Tio> __global__ void nhwc_padding_channel_3To8_kernel(const int32_t n, const int32_t h, const int32_t w, const Tio *input, Tio *output, const int32_t max_output_element, const int32_t max_input_element, const Tio zero_io, const Telement zero_element){ __shared__ Tio shm[192]; const int tidx = blockIdx.x * 192 + threadIdx.x; const int threadidx = threadIdx.x; shm[threadIdx.x] = tidx >= max_input_element ? zero_io : input[tidx]; __syncthreads(); const int output_offset = blockIdx.x * 512; const int lower_bound = max_output_element < output_offset + 512 ? max_output_element : output_offset + 512; for (int i = output_offset + threadidx, j = threadidx ; i < lower_bound ; i+=192, j+=192) { const Telement* shm_element = (const Telement*)shm + (element_in_Tio == 4 ? j/2 : j)*3; Telement array[element_in_Tio]; //float if (element_in_Tio == 4){ CUTLASS_PRAGMA_UNROLL for (int k = 0 ; k < element_in_Tio ; k++) array[k] = ((j % 2) == 1) ? zero_element : ((k >= 3) ? zero_element : shm_element[k]); } //half else{ CUTLASS_PRAGMA_UNROLL for (int k = 0 ; k < element_in_Tio ; k++) array[k] = (k >= 3) ? zero_element : shm_element[k]; } output[i] = *((const Tio *)array); } } template <typename T> void nhwc_padding(cutlass::Tensor4DCoord input_tensor_size, cutlass::Tensor4DCoord output_tensor_size, TensorRef<T, layout::TensorNHWC> ref_input, TensorRef<T, layout::TensorNHWC> ref_output, cudaStream_t stream){ assert( input_tensor_size.n() == output_tensor_size.n() && input_tensor_size.h() == output_tensor_size.h() && input_tensor_size.w() == output_tensor_size.w() && input_tensor_size.c() <= output_tensor_size.c()); int n = input_tensor_size.n(); int h = input_tensor_size.h(); int w = input_tensor_size.w(); int c_in = input_tensor_size.c(); int c_out = output_tensor_size.c(); //case 1 : channel == 3 padding to 4 or 8 if ((c_out == 4 || c_out == 8) && c_in == 3 && (n*h*w % 8 == 0)){ dim3 block(192); const int nhw = n*h*w; const int nhwc = nhw*c_in; //for half_t if (cutlass::sizeof_bits<T>::value == 16){ const int element_in_Tio = 8; const int max_input_element = nhwc/element_in_Tio; const int max_output_element = nhw*c_out/element_in_Tio; const int4 zero_io = {0, 0, 0, 0}; const half_t zero_element = static_cast<half_t>(0.0f); dim3 grid((nhwc + 192*element_in_Tio - 1)/(192*element_in_Tio)); if (c_out == 4){ nhwc_padding_channel_3To4_kernel<int4, half_t, element_in_Tio><<<grid, block, 0, stream>>> (n, h, w, (const int4 *)ref_input.data(), (int4 *)ref_output.data(), max_output_element, max_input_element, zero_io, zero_element); } else if (c_out == 8){ nhwc_padding_channel_3To8_kernel<int4, half_t, element_in_Tio><<<grid, block, 0, stream>>> (n, h, w, (const int4 *)ref_input.data(), (int4 *)ref_output.data(), max_output_element, max_input_element, zero_io, zero_element); } } //for float else{ const int element_in_Tio = 4; const int max_input_element = nhwc/element_in_Tio; const int max_output_element = nhw*c_out/element_in_Tio; const float4 zero_io = {0.0f, 0.0f, 0.0f, 0.0f}; const float zero_element = 0.0f; dim3 grid((nhwc + 192*element_in_Tio - 1)/(192*element_in_Tio)); if (c_out == 4){ nhwc_padding_channel_3To4_kernel<float4, float, element_in_Tio><<<grid, block, 0, stream>>> (n, h, w, (const float4 *)ref_input.data(), (float4 *)ref_output.data(), max_output_element, max_input_element, zero_io, zero_element); } else if (c_out == 8){ nhwc_padding_channel_3To8_kernel<float4, float, element_in_Tio><<<grid, block, 0, stream>>> (n, h, w, (const float4 *)ref_input.data(), (float4 *)ref_output.data(), max_output_element, max_input_element, zero_io, zero_element); } } } //case 2 : even channel else if ((c_out % 2) == 0 && (c_in % 2) == 0){ int32_t total_elements = n * h * w * c_out / 2; int block_size = 256; dim3 grid((total_elements + 255)/256); dim3 block(block_size); //for half_t if (cutlass::sizeof_bits<T>::value == 16){ const __half2 zero = {0.0f, 0.0f}; nhwc_padding_kernel<<<grid, block, 0, stream>>>(n, h, w, c_in/2, c_out/2, zero, (const __half2*)ref_input.data(), (__half2*)ref_output.data()); } //for float else{ const float2 zero = {0.0f, 0.0f}; nhwc_padding_kernel<<<grid, block, 0, stream>>>(n, h, w, c_in/2, c_out/2, zero, (const float2*)ref_input.data(), (float2*)ref_output.data()); } } //case 3 : odd channel else{ int32_t total_elements = n * h * w * c_out; int block_size = 256; dim3 grid((total_elements + 255)/256); dim3 block(block_size); const T zero = static_cast<T>(0.0f); nhwc_padding_kernel<<<grid, block, 0, stream>>>(n, h, w, c_in, c_out, zero, ref_input.data(), ref_output.data()); } } } //namespace cutlass
tools/util/include/cutlass/util/device_nhwc_padding.h/0
{ "file_path": "tools/util/include/cutlass/util/device_nhwc_padding.h", "repo_id": "tools", "token_count": 5398 }
63
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Reference implementation for GEMM in host-side code. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" namespace cutlass { namespace reference { namespace detail { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Template function to compute an inner product. #pragma hd_warning_disable // Suppresses warnings when attempting to instantiate with a // host-only type template <typename Atype, typename Btype, typename Ctype> CUTLASS_HOST_DEVICE Ctype inner_product(Atype a, Btype b, Ctype c) { return Ctype(a) * Ctype(b) + c; } /// Specialization for matrix multiplication with binary operands template <> CUTLASS_HOST_DEVICE int inner_product<Array<bin1_t, 32>, Array<bin1_t, 32>, int>( Array<bin1_t, 32> a, Array<bin1_t, 32> b, int c) { int accum = 0; for (int bit = 0; bit < 32; bit++) { accum += a[bit] ^ b[bit]; } return accum + c; } /* /// Specialization for matrix multiplication with signed 4-bit integer operands template <> CUTLASS_HOST_DEVICE int inner_product<Array<int4b_t, 8>, Array<int4b_t, 8>, int>( Array<int4b_t, 8> a, Array<int4b_t, 8> b, int c) { int accum = 0; for (int k = 0; k < 8; k++) { accum += a[k] * b[k]; } return accum + c; } /// Specialization for matrix multiplication with unsigned 4-bit integer operands template <> CUTLASS_HOST_DEVICE int inner_product<Array<uint4b_t, 8>, Array<uint4b_t, 8>, int>( Array<uint4b_t, 8> a, Array<uint4b_t, 8> b, int c) { int accum = 0; for (int k = 0; k < 8; k++) { accum += a[k] * b[k]; } return accum + c; } */ //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename SrcType, typename DstType> struct Cast { // Default behavior: convert to the destination type #pragma hd_warning_disable // Suppresses warnings when attempting to instantiate complex<T> with a // host-only type CUTLASS_HOST_DEVICE static DstType apply(SrcType src) { return static_cast<DstType>(src); }; }; template <> struct Cast<float, int8_t> { CUTLASS_HOST_DEVICE static int8_t apply(float src) { // Clamp to the range of signed 8-bit integers. return static_cast<int8_t>(fmaxf(-128.f, fminf(127.f, src))); }; }; template <> struct Cast<float, uint8_t> { CUTLASS_HOST_DEVICE static uint8_t apply(float src) { // Clamp to the range of signed 8-bit integers. return static_cast<uint8_t>(fmaxf(0.f, fminf(255.f, src))); }; }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace detail } // namespace reference } // namespace cutlass
tools/util/include/cutlass/util/reference/detail/inner_product.h/0
{ "file_path": "tools/util/include/cutlass/util/reference/detail/inner_product.h", "repo_id": "tools", "token_count": 1468 }
64
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Reference implementation for GEMM in host-side code. */ #pragma once #include "cutlass/coord.h" #include "cutlass/tensor_view.h" #include "cutlass/gemm/gemm.h" namespace cutlass { namespace reference { namespace device { namespace thread { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Thread-level blocked general matrix product. // // Note, this is a reference implementation. Performance is not expected to approach peak. // template < typename TensorRefA, typename TensorRefB, typename TensorRefC, typename ScalarType, typename AccumulatorType, typename OutputTile, typename InnerProductOp = multiply_add<AccumulatorType>, typename ConvertOp = NumericConverter<typename TensorRefC::Element, ScalarType> > struct Gemm { using ElementA = typename TensorRefA::Element; using ElementB = typename TensorRefB::Element; using ElementC = typename TensorRefC::Element; // // Data members // /// Tile for A operand ElementA A_tile[OutputTile::kColumn]; /// Tile for B operand ElementB B_tile[OutputTile::kRow]; /// Tile for Accumulator AccumulatorType accum[OutputTile::kColumn][OutputTile::kRow]; // // Methods // /// Constructor CUTLASS_HOST_DEVICE Gemm(AccumulatorType initial_accum = AccumulatorType(0)) { // Clear fetch registers for (int i = 0; i < OutputTile::kColumn; ++i) { A_tile[i] = ElementA(0); } for (int j = 0; j < OutputTile::kRow; ++j) { B_tile[j] = ElementB(0); } // Clear accumulators CUTLASS_PRAGMA_UNROLL for (int j = 0; j < OutputTile::kColumn; ++j) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < OutputTile::kRow; ++i) { accum[j][i] = initial_accum; } } } /// Computes a matrix product CUTLASS_HOST_DEVICE Gemm & multiply_add( gemm::GemmCoord problem_size, TensorRefA tensor_a, TensorRefB tensor_b, MatrixCoord output_coord = MatrixCoord()) { InnerProductOp inner_product_op; // Loop over the GEMM K dimension CUTLASS_PRAGMA_NO_UNROLL for (int k = 0; k < problem_size.k(); ++k) { // Fetch a slice of the A matrix CUTLASS_PRAGMA_UNROLL for (int i = 0; i < OutputTile::kColumn; ++i) { if (output_coord.row() + i < problem_size.m()) { A_tile[i] = tensor_a.at(make_Coord(output_coord.row() + i, k)); } } // Fetch a slice of the B matrix CUTLASS_PRAGMA_UNROLL for (int j = 0; j < OutputTile::kRow; ++j) { if (output_coord.column() + j < problem_size.n()) { B_tile[j] = tensor_b.at(make_Coord(k, output_coord.column() + j)); } } // Compute an accumulated matrix product CUTLASS_PRAGMA_UNROLL for (int j = 0; j < OutputTile::kRow; ++j) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < OutputTile::kColumn; ++i) { accum[j][i] = inner_product_op(A_tile[i], B_tile[j], accum[j][i]); } } } return *this; } /// Performs linear scaling of matrix product and updates output tensor CUTLASS_HOST_DEVICE Gemm & epilogue( gemm::GemmCoord problem_size, ScalarType alpha, ScalarType beta, TensorRefC tensor_c, TensorRefC tensor_d, MatrixCoord output_coord = MatrixCoord()) { ConvertOp convert_op; // Update the output tensor for (int j = 0; j < OutputTile::kRow; ++j) { for (int i = 0; i < OutputTile::kColumn; ++i) { MatrixCoord coord = output_coord + MatrixCoord(i, j); if (coord.row() < problem_size.m() && coord.column() < problem_size.n()) { tensor_d.at(coord) = convert_op( alpha * ScalarType(accum[j][i]) + beta * ScalarType(tensor_c.at(coord)) ); } } } return *this; } }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace device } // namespace reference } // namespace cutlass
tools/util/include/cutlass/util/reference/device/thread/gemm.h/0
{ "file_path": "tools/util/include/cutlass/util/reference/device/thread/gemm.h", "repo_id": "tools", "token_count": 2101 }
65
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. if(CUDA_COMPILER MATCHES "[Cc]lang") set(CUTLASS_NATIVE_CUDA_INIT ON) elseif(CMAKE_VERSION VERSION_LESS 3.12.4) set(CUTLASS_NATIVE_CUDA_INIT OFF) else() set(CUTLASS_NATIVE_CUDA_INIT ON) endif() set(CUTLASS_NATIVE_CUDA ${CUTLASS_NATIVE_CUDA_INIT} CACHE BOOL "Utilize the CMake native CUDA flow") if(NOT DEFINED ENV{CUDACXX} AND NOT DEFINED ENV{CUDA_BIN_PATH} AND DEFINED ENV{CUDA_PATH}) # For backward compatibility, allow use of CUDA_PATH. set(ENV{CUDACXX} $ENV{CUDA_PATH}/bin/nvcc) endif() if(CUTLASS_NATIVE_CUDA) enable_language(CUDA) if(NOT CUDA_VERSION) set(CUDA_VERSION ${CMAKE_CUDA_COMPILER_VERSION}) endif() if(NOT CUDA_TOOLKIT_ROOT_DIR) get_filename_component(CUDA_TOOLKIT_ROOT_DIR "${CMAKE_CUDA_COMPILER}/../.." ABSOLUTE) endif() else() find_package(CUDA REQUIRED) # We workaround missing variables with the native flow by also finding the CUDA toolkit the old way. if(NOT CMAKE_CUDA_COMPILER_VERSION) set(CMAKE_CUDA_COMPILER_VERSION ${CUDA_VERSION}) endif() endif() if (CUDA_VERSION VERSION_LESS 9.2) message(FATAL_ERROR "CUDA 9.2+ Required, Found ${CUDA_VERSION}.") endif() if(NOT CUTLASS_NATIVE_CUDA OR CUDA_COMPILER MATCHES "[Cc]lang") set(CMAKE_CUDA_COMPILER ${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc) message(STATUS "CUDA Compiler: ${CMAKE_CUDA_COMPILER}") endif() find_library( CUDART_LIBRARY cudart PATHS ${CUDA_TOOLKIT_ROOT_DIR} PATH_SUFFIXES lib/x86_64-linux-gnu lib/x64 lib64 lib NO_DEFAULT_PATH # We aren't going to search any system paths. We want to find the runtime # in the CUDA toolkit we're building against. ) if(NOT TARGET cudart AND CUDART_LIBRARY) message(STATUS "CUDART: ${CUDART_LIBRARY}") if(WIN32) add_library(cudart STATIC IMPORTED GLOBAL) # Even though we're linking against a .dll, in Windows you statically link against # the .lib file found under lib/x64. The .dll will be loaded at runtime automatically # from the PATH search. else() add_library(cudart SHARED IMPORTED GLOBAL) endif() add_library(nvidia::cudart ALIAS cudart) set_property( TARGET cudart PROPERTY IMPORTED_LOCATION ${CUDART_LIBRARY} ) elseif(TARGET cudart) message(STATUS "CUDART: Already Found") else() message(STATUS "CUDART: Not Found") endif() find_library( CUDA_DRIVER_LIBRARY cuda PATHS ${CUDA_TOOLKIT_ROOT_DIR} PATH_SUFFIXES lib/x86_64-linux-gnu lib/x64 lib64 lib lib64/stubs lib/stubs NO_DEFAULT_PATH # We aren't going to search any system paths. We want to find the runtime # in the CUDA toolkit we're building against. ) if(NOT TARGET cuda_driver AND CUDA_DRIVER_LIBRARY) message(STATUS "CUDA Driver: ${CUDA_DRIVER_LIBRARY}") if(WIN32) add_library(cuda_driver STATIC IMPORTED GLOBAL) # Even though we're linking against a .dll, in Windows you statically link against # the .lib file found under lib/x64. The .dll will be loaded at runtime automatically # from the PATH search. else() add_library(cuda_driver SHARED IMPORTED GLOBAL) endif() add_library(nvidia::cuda_driver ALIAS cuda_driver) set_property( TARGET cuda_driver PROPERTY IMPORTED_LOCATION ${CUDA_DRIVER_LIBRARY} ) elseif(TARGET cuda_driver) message(STATUS "CUDA Driver: Already Found") else() message(STATUS "CUDA Driver: Not Found") endif() find_library( NVRTC_LIBRARY nvrtc PATHS ${CUDA_TOOLKIT_ROOT_DIR} PATH_SUFFIXES lib/x64 lib64 lib NO_DEFAULT_PATH # We aren't going to search any system paths. We want to find the runtime # in the CUDA toolkit we're building against. ) if(NOT TARGET nvrtc AND NVRTC_LIBRARY) message(STATUS "NVRTC: ${NVRTC_LIBRARY}") if(WIN32) add_library(nvrtc STATIC IMPORTED GLOBAL) # Even though we're linking against a .dll, in Windows you statically link against # the .lib file found under lib/x64. The .dll will be loaded at runtime automatically # from the PATH search. else() add_library(nvrtc SHARED IMPORTED GLOBAL) endif() add_library(nvidia::nvrtc ALIAS nvrtc) set_property( TARGET nvrtc PROPERTY IMPORTED_LOCATION ${NVRTC_LIBRARY} ) elseif(TARGET nvrtc) message(STATUS "NVRTC: Already Found") else() message(STATUS "NVRTC: Not Found") endif() include_directories(SYSTEM ${CUDA_INCLUDE_DIRS}) # Some platforms (e.g. Visual Studio) don't add the CUDA include directories to the system include # paths by default, so we add it explicitly here. function(cutlass_correct_source_file_language_property) if(CUDA_COMPILER MATCHES "[Cc]lang") foreach(File ${ARGN}) if(File MATCHES ".*\.cu$") set_source_files_properties(${File} PROPERTIES LANGUAGE CXX) endif() endforeach() endif() endfunction() if (MSVC OR CUTLASS_LIBRARY_KERNELS MATCHES "all") set(CUTLASS_UNITY_BUILD_ENABLED_INIT ON) else() set(CUTLASS_UNITY_BUILD_ENABLED_INIT OFF) endif() set(CUTLASS_UNITY_BUILD_ENABLED ${CUTLASS_UNITY_BUILD_ENABLED_INIT} CACHE BOOL "Enable combined source compilation") if (MSVC) set(CUTLASS_UNITY_BUILD_BATCH_SIZE_INIT 8) else() set(CUTLASS_UNITY_BUILD_BATCH_SIZE_INIT 16) endif() set(CUTLASS_UNITY_BUILD_BATCH_SIZE ${CUTLASS_UNITY_BUILD_BATCH_SIZE_INIT} CACHE STRING "Batch size for unified source files") function(cutlass_unify_source_files TARGET_ARGS_VAR) set(options) set(oneValueArgs BATCH_SOURCES BATCH_SIZE) set(multiValueArgs) cmake_parse_arguments(_ "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) if (NOT DEFINED TARGET_ARGS_VAR) message(FATAL_ERROR "TARGET_ARGS_VAR parameter is required") endif() if (NOT DEFINED __BATCH_SOURCES) set(__BATCH_SOURCES ON) endif() if (__BATCH_SOURCES AND NOT DEFINED __BATCH_SIZE) set(__BATCH_SIZE ${CUTLASS_UNITY_BUILD_BATCH_SIZE}) endif() if (CUTLASS_UNITY_BUILD_ENABLED AND __BATCH_SOURCES AND __BATCH_SIZE GREATER 1) set(CUDA_FILE_ARGS) set(TARGET_SOURCE_ARGS) foreach(ARG ${__UNPARSED_ARGUMENTS}) if(${ARG} MATCHES ".*\.cu$") list(APPEND CUDA_FILE_ARGS ${ARG}) else() list(APPEND TARGET_SOURCE_ARGS ${ARG}) endif() endforeach() list(LENGTH CUDA_FILE_ARGS NUM_CUDA_FILE_ARGS) while(NUM_CUDA_FILE_ARGS GREATER 0) list(SUBLIST CUDA_FILE_ARGS 0 ${__BATCH_SIZE} CUDA_FILE_BATCH) string(SHA256 CUDA_FILE_BATCH_HASH "${CUDA_FILE_BATCH}") string(SUBSTRING ${CUDA_FILE_BATCH_HASH} 0 12 CUDA_FILE_BATCH_HASH) set(BATCH_FILE ${CMAKE_CURRENT_BINARY_DIR}/${NAME}.unity.${CUDA_FILE_BATCH_HASH}.cu) message(STATUS "Generating ${BATCH_FILE}") file(WRITE ${BATCH_FILE} "// Unity File - Auto Generated!\n") foreach(CUDA_FILE ${CUDA_FILE_BATCH}) get_filename_component(CUDA_FILE_ABS_PATH ${CUDA_FILE} ABSOLUTE) file(APPEND ${BATCH_FILE} "#include \"${CUDA_FILE_ABS_PATH}\"\n") endforeach() list(APPEND TARGET_SOURCE_ARGS ${BATCH_FILE}) if (NUM_CUDA_FILE_ARGS LESS_EQUAL __BATCH_SIZE) break() endif() list(SUBLIST CUDA_FILE_ARGS ${__BATCH_SIZE} -1 CUDA_FILE_ARGS) list(LENGTH CUDA_FILE_ARGS NUM_CUDA_FILE_ARGS) endwhile() else() set(TARGET_SOURCE_ARGS ${__UNPARSED_ARGUMENTS}) endif() set(${TARGET_ARGS_VAR} ${TARGET_SOURCE_ARGS} PARENT_SCOPE) endfunction() function(cutlass_add_library NAME) set(options SKIP_GENCODE_FLAGS) set(oneValueArgs EXPORT_NAME) set(multiValueArgs) cmake_parse_arguments(_ "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cutlass_unify_source_files(TARGET_SOURCE_ARGS ${__UNPARSED_ARGUMENTS}) if(CUTLASS_NATIVE_CUDA OR CUDA_COMPILER MATCHES "clang") cutlass_correct_source_file_language_property(${TARGET_SOURCE_ARGS}) add_library(${NAME} ${TARGET_SOURCE_ARGS} "") else() set(CUDA_LINK_LIBRARIES_KEYWORD PRIVATE) cuda_add_library(${NAME} ${TARGET_SOURCE_ARGS} "") endif() cutlass_apply_standard_compile_options(${NAME}) if (NOT __SKIP_GENCODE_FLAGS) cutlass_apply_cuda_gencode_flags(${NAME}) endif() target_compile_features( ${NAME} INTERFACE cxx_std_11 ) get_target_property(TARGET_TYPE ${NAME} TYPE) if (TARGET_TYPE MATCHES "SHARED") set_target_properties(${NAME} PROPERTIES CUDA_RUNTIME_LIBRARY Shared) elseif(TARGET_TYPE MATCHES "STATIC") set_target_properties(${NAME} PROPERTIES CUDA_RUNTIME_LIBRARY Static) endif() if(__EXPORT_NAME) add_library(nvidia::cutlass::${__EXPORT_NAME} ALIAS ${NAME}) set_target_properties(${NAME} PROPERTIES EXPORT_NAME ${__EXPORT_NAME}) endif() endfunction() function(cutlass_add_executable NAME) set(options) set(oneValueArgs CUDA_RUNTIME_LIBRARY) set(multiValueArgs) cmake_parse_arguments(_ "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) if (NOT DEFINED __CUDA_RUNTIME_LIBRARY) set(__CUDA_RUNTIME_LIBRARY Shared) endif() set(__CUDA_RUNTIME_LIBRARY_ALLOWED None Shared Static) if (NOT __CUDA_RUNTIME_LIBRARY IN_LIST __CUDA_RUNTIME_LIBRARY_ALLOWED) message(FATAL_ERROR "CUDA_RUNTIME_LIBRARY value '${__CUDA_RUNTIME_LIBRARY}' is not in allowed list of '${__CUDA_RUNTIME_LIBRARY_ALLOWED}'") endif() cutlass_unify_source_files(TARGET_SOURCE_ARGS ${__UNPARSED_ARGUMENTS}) if(CUTLASS_NATIVE_CUDA OR CUDA_COMPILER MATCHES "clang") cutlass_correct_source_file_language_property(${TARGET_SOURCE_ARGS}) add_executable(${NAME} ${TARGET_SOURCE_ARGS}) else() set(CUDA_LINK_LIBRARIES_KEYWORD PRIVATE) cuda_add_executable(${NAME} ${TARGET_SOURCE_ARGS}) endif() cutlass_apply_standard_compile_options(${NAME}) cutlass_apply_cuda_gencode_flags(${NAME}) target_compile_features( ${NAME} INTERFACE cxx_std_11 ) set_target_properties(${NAME} PROPERTIES CUDA_RUNTIME_LIBRARY ${__CUDA_RUNTIME_LIBRARY}) endfunction() function(cutlass_target_sources NAME) set(options) set(oneValueArgs) set(multiValueArgs) cmake_parse_arguments(_ "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cutlass_unify_source_files(TARGET_SOURCE_ARGS ${__UNPARSED_ARGUMENTS}) cutlass_correct_source_file_language_property(${TARGET_SOURCE_ARGS}) target_sources(${NAME} ${TARGET_SOURCE_ARGS}) endfunction()
CUDA.cmake/0
{ "file_path": "CUDA.cmake", "repo_id": "CUDA.cmake", "token_count": 4759 }
0
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Demonstrate CUTLASS debugging tool for dumping fragments and shared memory */ /////////////////////////////////////////////////////////////////////////////////////////////////// // Standard Library includes #include <iostream> // // CUTLASS includes // #include "cutlass/aligned_buffer.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/predicated_tile_iterator.h" #include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h" #include "cutlass/util/debug.h" #include "cutlass/util/device_dump.h" #define EXAMPLE_MATRIX_ROW 64 #define EXAMPLE_MATRIX_COL 32 /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Element, typename GmemIterator, typename SmemIterator> __global__ void kernel_dump(typename GmemIterator::Params params, typename GmemIterator::TensorRef ref) { extern __shared__ Element shared_storage[]; // Construct the global iterator and load the data to the fragments. int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x; GmemIterator gmem_iterator(params, ref.data(), {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL}, tb_thread_id); typename GmemIterator::Fragment frag; frag.clear(); gmem_iterator.load(frag); // Call dump_fragment() with different parameters. if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nAll threads dump all the elements:\n"); cutlass::debug::dump_fragment(frag); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps all the elements:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps first 16 elements:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1, /*M = */ 16); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps first 16 elements with a stride of 8:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1, /*M = */ 16, /*S = */ 8); // Construct the shared iterator and store the data to the shared memory. SmemIterator smem_iterator( typename SmemIterator::TensorRef( {shared_storage, SmemIterator::Layout::packed( {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL})}), tb_thread_id); smem_iterator.store(frag); // Call dump_shmem() with different parameters. if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nDump all the elements:\n"); cutlass::debug::dump_shmem(shared_storage, EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nDump all the elements with a stride of 8:\n"); cutlass::debug::dump_shmem( shared_storage, EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL, /*S = */ 8); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point for dump_reg_shmem example. // // usage: // // 02_dump_reg_shmem // int main() { // Initialize a 64x32 column major matrix with sequential data (1,2,3...). using Element = cutlass::half_t; using Layout = cutlass::layout::ColumnMajor; cutlass::HostTensor<Element, Layout> matrix( {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL}); cutlass::reference::host::BlockFillSequential(matrix.host_data(), matrix.capacity()); // Dump the matrix. std::cout << "Matrix:\n" << matrix.host_view() << "\n"; // Copy the matrix to the device. matrix.sync_device(); // Define a global iterator, a shared iterator and their thread map. using ThreadMap = cutlass::transform::PitchLinearWarpRakedThreadMap< cutlass::layout::PitchLinearShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, 32, cutlass::layout::PitchLinearShape<8, 4>, 8>; using GmemIterator = cutlass::transform::threadblock::PredicatedTileIterator< cutlass::MatrixShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, Element, Layout, 1, ThreadMap>; typename GmemIterator::Params params(matrix.layout()); using SmemIterator = cutlass::transform::threadblock::RegularTileIterator< cutlass::MatrixShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, Element, cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<16, 64>, 1, ThreadMap>; dim3 grid(1, 1); dim3 block(32, 1, 1); int smem_size = int(sizeof(Element) * EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL); kernel_dump<Element, GmemIterator, SmemIterator> <<<grid, block, smem_size, 0>>>(params, matrix.device_ref()); cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cout << "Failed" << std::endl; } return (result == cudaSuccess ? 0 : -1); } ///////////////////////////////////////////////////////////////////////////////////////////////////
examples/02_dump_reg_shmem/dump_reg_shmem.cu/0
{ "file_path": "examples/02_dump_reg_shmem/dump_reg_shmem.cu", "repo_id": "examples", "token_count": 2448 }
1
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to run matrix multiplication kernels using functions and data structures provided by CUTLASS using tensor cores; which we run on a NVIDIA Turing GPU. Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing high performance kernels at scale which works for multiple problem sizes with good abstractions is really hard. CUTLASS solves this problem by providing simplified abstractions to compose multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU easily. CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp and thread-block level, they compute on their own tile-size with higher level of tile sizes being composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute threadblock-tile (tile size computed by a threadblock). In thie example, we split variable initialization into 1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel can view them (logical to physical mapping) 2. Setting up computation properties : describes how the above set matrices will be used to compute output of matrix multiplication. First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to ElementComputeEpilogue = int32_t. As we want to use MMA instructions on Turing and they support 8-bit signed integer (int8_t), we use data type for elements in input matrix A and B as int8_t. Volta also supports accumulation of partial dot product to int32_t, which can store wider range of numbers, we use it as data type of output matrix elements and accumulation. We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (int32_t), ElementComputeEpilogue (int32_t), ElementInputA (int8_t), ElementInputB (int8_t), ElementOutput (int32_t). Communicating just the data type is not enough. As the data is laid out linearly in memory, we have to convey the layout of matrices. We do that by initializing template variable LayoutInputA to column major cutlass variable, LayoutInputB to row major and LayoutOutput to row major. Next, we setup rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the data type of output ElementOutput (int32_t), the number of elements per vector memory access (16), data type of accumulator (int32_t) and data type of computation of linear combination (alpha * X + beta * C). Now that we setup the properties of data, we have to setup properties of computation. Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x256x64, 64x64x16, 8x8x16 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally deduce the amount of threads needed per thread-block, amount of shared memory, storing data in bank-conflict free manner, and ton of other variables required to compose, initialize and launch a high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from understanding and coding complicated hardware optimizations which can easily go wrong. CUTLASS also supports multiple MMA pipelines in a threadblock. What are MMA pipelines? MMA pipelines constitute the whole process of loading input data from global memory to shared memory, loading data from shared memory to registers, doing matrix multiplication, store to global memory. The below flow sequence shows a typical mma pipeline. matrix in global memory -> registers -> tile in shared memory -> registers -> mma -> registers -> output to global memory The problem with single pipeline is, each stage is synchronous which means, each stage has to wait until the previous finished executing. There are stages in the pipeline which do not have fixed latency, for example, the loads from global memory and shared memory. Therefore, we can add one more pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads. Finally, the pipeline in a kernel looks like (1) matrix in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5) mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) matrix in global memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers -> (9) output to global memory This way, you can hide the second global memoroy load latency by doing computation on already loaded input data. There are few more template variables initialized such as, which threadblock tile of output matrix is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on. These are all put together to create a template variable which describes CUTLASS GEMM kernel using cutlass::gemm::device::Gemm template. The next step is to initialize physical data, instantiate and initialize CUTLASS kernel and run it. We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come in the way of learning CUTLASS. Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space memory required by the kernel we instantiated. If yes, we create it and pass it along with other arguments created to initialize CUTLASS kernel then, the kernel is launched. In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if the output from CUTLASS kernel is same as reference GEMM kernel. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = int32_t; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = int8_t; // <- data type of elements in input matrix A using ElementInputB = int8_t; // <- data type of elements in input matrix B using ElementOutput = int32_t; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Row Major for // Matrix A, Column Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm75; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 256, 64>; // <- threadblock tile M = 128, N = 256, K = 64 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 64>; // <- warp tile M = 64, N = 64, K = 64 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 16>; // <- MMA Op tile M = 8, N = 8, K = 16 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 2; using Gemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; int run() { const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Check the problem size is supported or not cutlass::Status status = gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // Create instantiation for device reference gemm kernel cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device; // Launch device reference gemm kernel gemm_device(problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref()); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; return (passed ? 0 : -1); } int main() { bool notSupported = false; // Turing Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 75)) { std::cerr << "Turing Tensor Core operations must be run on a machine with compute capability at least 75." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } return run(); }
examples/08_turing_tensorop_gemm/turing_tensorop_gemm.cu/0
{ "file_path": "examples/08_turing_tensorop_gemm/turing_tensorop_gemm.cu", "repo_id": "examples", "token_count": 6260 }
2
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/arch.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/gemm/device/default_gemm_configuration.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "kernel/b2b_gemm.h" #include "kernel/default_b2b_gemm.h" #include "kernel/default_b2b_gemm_smem_accumulator.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Element type for internal accumulation typename ElementAccumulator_ = ElementC_, /// Operator class tag typename OperatorClass_ = arch::OpClassSimt, /// Tag indicating architecture to tune for typename ArchTag_ = arch::Sm70, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape0_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::ThreadblockShape, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape1_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape0_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::WarpShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape1_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::InstructionShape, /// Epilogue output operator typename EpilogueOutputOp0_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::EpilogueOutputOp, /// Epilogue output operator typename EpilogueOutputOp1_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>, /// Number of stages used in the pipelined mainloop int Stages = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kStages, /// Stage accumulator in shared memory bool SmemAccumulator = false, /// Access granularity of A matrix in units of elements int AlignmentA = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentA, /// Access granularity of B matrix in units of elements int AlignmentB = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentB, /// Operation performed by GEMM typename Operator_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::Operator> class B2bGemm { public: using ElementA = ElementA_; using LayoutA = LayoutA_; using TensorRefA = TensorRef<ElementA const, LayoutA>; using ElementB = ElementB_; using LayoutB = LayoutB_; using TensorRefB = TensorRef<ElementB const, LayoutB>; using ElementC = ElementC_; using LayoutC = LayoutC_; using TensorRefC = TensorRef<ElementC const, LayoutC>; using TensorRefD = TensorRef<ElementC, LayoutC>; using ElementAccumulator = ElementAccumulator_; using OperatorClass = OperatorClass_; using ArchTag = ArchTag_; using ThreadblockShape0 = ThreadblockShape0_; using ThreadblockShape1 = ThreadblockShape1_; using WarpShape0 = WarpShape0_; using WarpShape1 = WarpShape1_; using InstructionShape = InstructionShape_; using EpilogueOutputOp0 = EpilogueOutputOp0_; using EpilogueOutputOp1 = EpilogueOutputOp1_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentB = AlignmentB; static int const kAlignmentC = EpilogueOutputOp1::kCount; static ComplexTransform const kTransformA = ComplexTransform::kNone; static ComplexTransform const kTransformB = ComplexTransform::kNone; /// Derived types using ElementScaleBias = typename EpilogueOutputOp0::ElementCompute; using LayoutScaleBias = layout::RowMajor; /// Define the kernel using B2bGemmKernel = typename kernel::DefaultB2bGemm< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, LayoutC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape0, ThreadblockShape1, WarpShape0, WarpShape1, InstructionShape, EpilogueOutputOp0, EpilogueOutputOp1, ThreadblockSwizzle, kStages, Operator, SmemAccumulator >::B2bGemmKernel; using Arguments = typename B2bGemmKernel::Arguments; private: /// Kernel parameters object typename B2bGemmKernel::Params params_; public: /// Constructs the GEMM. B2bGemm() { } /// Determines whether the GEMM can execute the given problem. static Status can_implement(Arguments const &args) { Status status = B2bGemmKernel::can_implement( args.problem_size_0, args.problem_size_1, args.ref_A0.non_const_ref(), args.ref_B0.non_const_ref(), args.ref_C0.non_const_ref(), args.ref_B1.non_const_ref(), args.ref_C1.non_const_ref(), args.ref_D1 ); if (status != Status::kSuccess) { return status; } return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { size_t bytes = 0; // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape( args.problem_size_0, {ThreadblockShape0::kM, ThreadblockShape0::kN, ThreadblockShape0::kK}, args.batch_count); return bytes; } /// Initializes GEMM state from arguments. Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape( args.problem_size_0, {ThreadblockShape0::kM, ThreadblockShape0::kN, ThreadblockShape0::kK}, args.batch_count); // cutlass::gemm::GemmCoord grid_shape_1 = threadblock_swizzle.get_tiled_shape( // args.problem_size_1, // {ThreadblockShape1::kM, ThreadblockShape1::kN, ThreadblockShape1::kK}, // args.batch_count); // Initialize the Params structure params_ = typename B2bGemmKernel::Params{ args.mode, args.problem_size_0, args.problem_size_1, grid_shape, args.ref_A0.non_const_ref(), args.ref_B0.non_const_ref(), args.ref_C0.non_const_ref(), args.ref_Scale0.non_const_ref(), args.ref_Bias0.non_const_ref(), args.ref_B1.non_const_ref(), args.ref_C1.non_const_ref(), args.ref_D1, args.batch_stride_A0, args.batch_stride_B0, args.batch_stride_B1, args.batch_stride_C1, args.batch_stride_D1, args.batch_stride_Bias0, args.batch_stride_Scale0, args.epilogue0, args.epilogue1, static_cast<int *>(workspace), }; return Status::kSuccess; } /// Lightweight update given a subset of arguments Status update(Arguments const &args, void *workspace = nullptr) { params_.ref_A0.reset(args.ref_A0.non_const_ref().data()); params_.ref_B0.reset(args.ref_B0.non_const_ref().data()); params_.ref_C0.reset(args.ref_C0.non_const_ref().data()); params_.ref_Scale0.reset(args.ref_Scale0.non_const_ref().data()); params_.ref_Bias0.reset(args.ref_Bias0.non_const_ref().data()); params_.ref_B1.reset(args.ref_B1.non_const_ref().data()); params_.ref_C1.reset(args.ref_C1.non_const_ref().data()); params_.ref_D1.reset(args.ref_D1.data()); params_.output_op_0 = args.epilogue0; params_.output_op_1 = args.epilogue1; params_.semaphore = static_cast<int *>(workspace); return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape); dim3 block(B2bGemmKernel::kThreadCount, 1, 1); cudaError_t result; int smem_size = int(sizeof(typename B2bGemmKernel::SharedStorage)); if (smem_size >= (48 << 10)) { result = cudaFuncSetAttribute(Kernel<B2bGemmKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } cutlass::Kernel<B2bGemmKernel><<<grid, block, smem_size, stream>>>(params_); result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; } // namespace device } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
examples/13_two_tensor_op_fusion/device/b2b_gemm.h/0
{ "file_path": "examples/13_two_tensor_op_fusion/device/b2b_gemm.h", "repo_id": "examples", "token_count": 4594 }
3
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a multistage threadblock-scoped Implicit GEMM Convolution kernel. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/cache_operation.h" #include "cutlass/gemm/threadblock/mma_base.h" #include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h" #include "threadblock/b2b_mma_base.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape0_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA0_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA0_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA0, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB0_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB0_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB0, /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape1_, /// Iterates over the intermediate accumulator tile // (concept::MmaTensorOpFragmentIterator) typename FragmentIteratorA1_, /// Iterates over vectors of scale and bias vector in global memory // (concept: VectorIterator) typename IteratorAccumulatorScaleBias_, /// WarpIterator to load Scale or Bias vector from threadblock fragment typename FragmentIteratorA1ScaleBias_, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB1_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB1_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB1, /// Output operator for 1st Gemm(concept: epilogue::thread::LinearCombinationClamp, etc...) typename OutputOp_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy0_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy1_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class B2bImplicitGemmMultistage : public gemm::threadblock::B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages> { public: ///< Base class using Base = gemm::threadblock::B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages>; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape0 = Shape0_; ///< Iterates over tiles of A operand in global memory using IteratorA0 = IteratorA0_; ///< Iterates over tiles of B operand in global memory using IteratorB0 = IteratorB0_; ///< Policy describing tuning details using Policy0 = Policy0_; using SmemIteratorA0 = SmemIteratorA0_; using SmemIteratorB0 = SmemIteratorB0_; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape1 = Shape1_; ///< Iterates over tiles of A operand in global memory using FragmentIteratorA1 = FragmentIteratorA1_; ///< Iterates over tiles of the scale and bias vectors in global memory using IteratorAccumulatorScaleBias = IteratorAccumulatorScaleBias_; ///< WarpIterator to load Scale or Bias vector from threadblock fragment using FragmentIteratorA1ScaleBias = FragmentIteratorA1ScaleBias_; ///< Iterates over tiles of B operand in global memory using IteratorB1 = IteratorB1_; ///< Policy describing tuning details using Policy1 = Policy1_; using SmemIteratorB1 = SmemIteratorB1_; ///< Epilogue after 1st Gemm using OutputOp = OutputOp_; static const bool PerChannelScale = (OutputOp::kScale == epilogue::thread::ScaleType::OnlyAlphaPerChannelScaling); static cutlass::arch::CacheOperation::Kind const kCacheOpA0 = CacheOpA0; static cutlass::arch::CacheOperation::Kind const kCacheOpB0 = CacheOpB0; static cutlass::arch::CacheOperation::Kind const kCacheOpB1 = CacheOpB1; // // Dependent types // using ElementC = typename Policy0::Operator::ElementC; /// Fragment of accumulator tile using FragmentC0 = typename Policy0::Operator::FragmentC; /// Warp-level Mma using Operator0 = typename Policy0::Operator; /// Fragment of Scale and Bias loaded from global memory using FragmentA1ScaleBias = typename IteratorAccumulatorScaleBias::Fragment; /// Fragment of accumulator tile using FragmentC1 = typename Policy1::Operator::FragmentC; /// Warp-level Mma using Operator1 = typename Policy1::Operator; /// Internal structure exposed for introspection. struct Detail { static_assert(Base::kWarpGemmIterations0 > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); static_assert(Base::kWarpGemmIterations1 > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); /// Number of cp.async instructions to load one stage of operand A static int const AsyncCopyIterationsPerStageA0 = IteratorA0::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const AsyncCopyIterationsPerStageB0 = IteratorB0::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const AsyncCopyIterationsPerStageB1 = IteratorB1::ThreadMap::Iterations::kCount; /// Number of stages static int const kStages = Stages; /// Number of cp.async instructions to load on group of operand A static int const kAccessesPerGroupA0 = (AsyncCopyIterationsPerStageA0 + Base::kWarpGemmIterations0 - 1) / Base::kWarpGemmIterations0; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB0 = (AsyncCopyIterationsPerStageB0 + Base::kWarpGemmIterations0 - 1) / Base::kWarpGemmIterations0; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB1 = (AsyncCopyIterationsPerStageB1 + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1; }; private: using WarpLoadedFragmentA0 = typename Operator0::FragmentA; using WarpLoadedFragmentB0 = typename Operator0::FragmentB; /// Warp Fragment of operand A1 loaded from accmulator tile using WarpLoadedFragmentA1 = typename FragmentIteratorA1::Fragment; using WarpLoadedFragmentA1ScaleBias = typename FragmentIteratorA1ScaleBias::Fragment; using WarpLoadedFragmentB1 = typename Operator1::FragmentB; using WarpTransformedFragmentA0 = typename Operator0::TransformedFragmentA; using WarpTransformedFragmentB0 = typename Operator0::TransformedFragmentB; using WarpTransformedFragmentA1 = typename Operator1::TransformedFragmentA; using WarpTransformedFragmentB1 = typename Operator1::TransformedFragmentB; private: // // Data members // /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA0 smem_iterator_A0_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB0 smem_iterator_B0_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB1 smem_iterator_B1_; public: /// Construct from tensor references CUTLASS_DEVICE B2bImplicitGemmMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::B2bMmaSharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx ): Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A0_(shared_storage.shared_storage0.operand_A_ref(), thread_idx), smem_iterator_B0_(shared_storage.shared_storage0.operand_B_ref(), thread_idx), smem_iterator_B1_(shared_storage.shared_storage1.operand_B_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount0::kM * Base::WarpCount0::kN); int warp_idx_k = warp_idx / (Base::WarpCount0::kM * Base::WarpCount0::kN); int warp_idx_m = warp_idx_mn % Base::WarpCount0::kM; int warp_idx_n = warp_idx_mn / Base::WarpCount0::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A0_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations0 * warp_idx_k}); this->warp_tile_iterator_B0_.add_tile_offset( {Base::kWarpGemmIterations0 * warp_idx_k, warp_idx_n}); this->warp_tile_iterator_B1_.add_tile_offset( {Base::kWarpGemmIterations1 * warp_idx_k, warp_idx_n}); } CUTLASS_DEVICE void copy_tiles_and_advance_0( IteratorA0 &iterator_A0, IteratorB0 &iterator_B0, int group_start_A0 = 0, int group_start_B0 = 0) { iterator_A0.set_iteration_index(group_start_A0); this->smem_iterator_A0_.set_iteration_index(group_start_A0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupA0; ++j) { if (group_start_A0 + j < Detail::AsyncCopyIterationsPerStageA0) { typename IteratorA0::AccessType *dst_ptr = reinterpret_cast<typename IteratorA0::AccessType *>( this->smem_iterator_A0_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA0::Element>::value * IteratorA0::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA0>( dst_ptr, iterator_A0.get(), iterator_A0.valid()); ++iterator_A0; ++this->smem_iterator_A0_; } } iterator_B0.set_iteration_index(group_start_B0); this->smem_iterator_B0_.set_iteration_index(group_start_B0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB0; ++j) { if (group_start_B0 + j < Detail::AsyncCopyIterationsPerStageB0) { typename IteratorB0::AccessType *dst_ptr = reinterpret_cast<typename IteratorB0::AccessType *>( this->smem_iterator_B0_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB0::Element>::value * IteratorB0::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB0>( dst_ptr, iterator_B0.get(), iterator_B0.valid()); ++iterator_B0; ++this->smem_iterator_B0_; } } } CUTLASS_DEVICE void copy_tiles_and_advance_1( IteratorB1 &iterator_B1, int group_start_B1 = 0) { iterator_B1.set_iteration_index(group_start_B1); this->smem_iterator_B1_.set_iteration_index(group_start_B1); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB1; ++j) { if (group_start_B1 + j < Detail::AsyncCopyIterationsPerStageB1) { typename IteratorB1::AccessType *dst_ptr = reinterpret_cast<typename IteratorB1::AccessType *>( this->smem_iterator_B1_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value * IteratorB1::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>( dst_ptr, iterator_B1.get(), iterator_B1.valid()); ++iterator_B1; ++this->smem_iterator_B1_; } } } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations_0, ///< destination accumulator tile FragmentC1 &accum, ///< iterator over A0 operand in global memory IteratorA0 iterator_A0, ///< iterator over B0 operand in global memory IteratorB0 iterator_B0, ///< iterator over A1 operand scale vector in global memory IteratorAccumulatorScaleBias iterator_A1_scale, ///< iterator over A1 operand bias vector in global memory IteratorAccumulatorScaleBias iterator_A1_bias, ///< iterator over B1 operand in global memory IteratorB1 iterator_B1, ///< initial value of accumulator FragmentC0 const &src_accum, ///< epilogue operation after 1st Gemm OutputOp output_op_0, ///< Imaginary strides used for planar-complex only - ignored here int64_t imag_stride_A = 0, int64_t imag_stride_B = 0) { // // Prologue // // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations_0) { iterator_A0.set_iteration_index(0); this->smem_iterator_A0_.set_iteration_index(0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA0; ++j) { typename IteratorA0::AccessType *dst_ptr = reinterpret_cast<typename IteratorA0::AccessType *>( this->smem_iterator_A0_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA0::Element>::value * IteratorA0::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA0>( dst_ptr, iterator_A0.get(), iterator_A0.valid()); ++iterator_A0; ++this->smem_iterator_A0_; } iterator_B0.set_iteration_index(0); this->smem_iterator_B0_.set_iteration_index(0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB0; ++j) { typename IteratorB0::AccessType *dst_ptr = reinterpret_cast<typename IteratorB0::AccessType *>( this->smem_iterator_B0_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB0::Element>::value * IteratorB0::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB0>( dst_ptr, iterator_B0.get(), iterator_B0.valid()); ++iterator_B0; ++this->smem_iterator_B0_; } // Move to the next stage iterator_A0.advance(); iterator_B0.advance(); this->smem_iterator_A0_.add_tile_offset({0, 1}); this->smem_iterator_B0_.add_tile_offset({1, 0}); // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); } // Perform accumulation in the 'd' output operand FragmentC0 accum0 = src_accum; // Waits until kStages-2 stages have committed. cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA0 warp_loaded_frag_A0[2]; WarpLoadedFragmentB0 warp_loaded_frag_B0[2]; WarpTransformedFragmentA0 warp_transformed_frag_A0[2]; WarpTransformedFragmentB0 warp_transformed_frag_B0[2]; Operator0 warp_mma0; this->warp_tile_iterator_A0_.set_kgroup_index(0); this->warp_tile_iterator_B0_.set_kgroup_index(0); this->warp_tile_iterator_A0_.load(warp_loaded_frag_A0[0]); this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[0]); ++this->warp_tile_iterator_A0_; ++this->warp_tile_iterator_B0_; // Start issuing the first group of the next stage outside of the mainloop copy_tiles_and_advance_0(iterator_A0, iterator_B0); int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma0.transform(warp_transformed_frag_A0[0], warp_transformed_frag_B0[0], warp_loaded_frag_A0[0], warp_loaded_frag_B0[0]); // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations_0 > (-Base::kStages + 1);) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_A0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0); this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0); this->warp_tile_iterator_A0_.load(warp_loaded_frag_A0[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_A0_; ++this->warp_tile_iterator_B0_; if (warp_mma_k > 0) warp_mma0.transform(warp_transformed_frag_A0[warp_mma_k % 2], warp_transformed_frag_B0[warp_mma_k % 2], warp_loaded_frag_A0[warp_mma_k % 2], warp_loaded_frag_B0[warp_mma_k % 2]); // Issue global->shared copies for the next stage int group_start_iteration_A0, group_start_iteration_B0; if (warp_mma_k + 1 == Base::kWarpGemmIterations0) { group_start_iteration_A0 = 0; group_start_iteration_B0 = 0; } else { group_start_iteration_A0 = (warp_mma_k + 1) * Detail::kAccessesPerGroupA0; group_start_iteration_B0 = (warp_mma_k + 1) * Detail::kAccessesPerGroupB0; } copy_tiles_and_advance_0(iterator_A0, iterator_B0, group_start_iteration_A0, group_start_iteration_B0); warp_mma0( accum0, warp_transformed_frag_A0[warp_mma_k % 2], warp_transformed_frag_B0[warp_mma_k % 2], accum0 ); if (warp_mma_k + 1 == Base::kWarpGemmIterations0) warp_mma0.transform(warp_transformed_frag_A0[(warp_mma_k + 1) % 2], warp_transformed_frag_B0[(warp_mma_k + 1) % 2], warp_loaded_frag_A0[(warp_mma_k + 1) % 2], warp_loaded_frag_B0[(warp_mma_k + 1) % 2]); if (warp_mma_k + 2 == Base::kWarpGemmIterations0) { // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages of cp.async have committed arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_A0.advance(); iterator_B0.advance(); this->smem_iterator_A0_.add_tile_offset({0, 1}); this->smem_iterator_B0_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_A0_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B0_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_A0_.add_tile_offset( {0, -Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0}); this->warp_tile_iterator_B0_.add_tile_offset( {-Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } --gemm_k_iterations_0; } } } // Insert fence and wait for all outstanding cp.async operations to commit. cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); // 2nd Implicit Gemm /// Iterator to load a warp-scoped tile of A1 operand from intermediate accumulator tile FragmentIteratorA1 warp_tile_iterator_A1_(accum0); FragmentA1ScaleBias tb_frag_A1_scale; FragmentA1ScaleBias tb_frag_A1_bias; FragmentIteratorA1ScaleBias warp_tile_iterator_A1_scale_(tb_frag_A1_scale); FragmentIteratorA1ScaleBias warp_tile_iterator_A1_bias_(tb_frag_A1_bias); if(PerChannelScale) { tb_frag_A1_scale.clear(); iterator_A1_scale.load(tb_frag_A1_scale); ++iterator_A1_scale; } tb_frag_A1_bias.clear(); iterator_A1_bias.load(tb_frag_A1_bias); ++iterator_A1_bias; // // Prologue // int gemm_k_iterations_1 = FragmentIteratorA1::Policy::kIterations / Base::kWarpGemmIterations1; // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations_1) { iterator_B1.set_iteration_index(0); this->smem_iterator_B1_.set_iteration_index(0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB1; ++j) { typename IteratorB1::AccessType *dst_ptr = reinterpret_cast<typename IteratorB1::AccessType *>( this->smem_iterator_B1_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value * IteratorB1::ThreadMap::kElementsPerAccess / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>( dst_ptr, iterator_B1.get(), iterator_B1.valid()); ++iterator_B1; ++this->smem_iterator_B1_; } // Move to the next stage iterator_B1.advance(); this->smem_iterator_B1_.add_tile_offset({1, 0}); // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); } // Waits until kStages-2 stages have committed. cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA1 warp_loaded_frag_A1[2]; WarpLoadedFragmentA1ScaleBias warp_loaded_frag_A1_scale[2]; WarpLoadedFragmentA1ScaleBias warp_loaded_frag_A1_bias[2]; WarpLoadedFragmentB1 warp_loaded_frag_B1[2]; WarpTransformedFragmentA1 warp_transformed_frag_A1[2]; WarpTransformedFragmentB1 warp_transformed_frag_B1[2]; Operator1 warp_mma1; if(PerChannelScale) { warp_tile_iterator_A1_scale_.load(warp_loaded_frag_A1_scale[0]); ++warp_tile_iterator_A1_scale_; } warp_tile_iterator_A1_bias_.load(warp_loaded_frag_A1_bias[0]); ++warp_tile_iterator_A1_bias_; warp_tile_iterator_A1_.load(warp_loaded_frag_A1[0], warp_loaded_frag_A1_scale[0], warp_loaded_frag_A1_bias[0], output_op_0); ++warp_tile_iterator_A1_; this->warp_tile_iterator_B1_.set_kgroup_index(0); this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[0]); ++this->warp_tile_iterator_B1_; // Start issuing the first group of the next stage outside of the mainloop copy_tiles_and_advance_1(iterator_B1); smem_write_stage_idx = Base::kStages - 1; smem_read_stage_idx = 0; warp_mma1.transform(warp_transformed_frag_A1[0], warp_transformed_frag_B1[0], warp_loaded_frag_A1[0], warp_loaded_frag_B1[0]); // // Mainloop // CUTLASS_PRAGMA_UNROLL for (gemm_k_iterations_1 = FragmentIteratorA1::Policy::kIterations / Base::kWarpGemmIterations1 - (Base::kStages - 1); gemm_k_iterations_1 > (-Base::kStages + 1); gemm_k_iterations_1--) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) { // Load threadblock-level scale/bias vector from global memory if (warp_mma_k + 1 == Base::kWarpGemmIterations1) { if(PerChannelScale) { tb_frag_A1_scale.clear(); iterator_A1_scale.load(tb_frag_A1_scale); ++iterator_A1_scale; } tb_frag_A1_bias.clear(); iterator_A1_bias.load(tb_frag_A1_bias); ++iterator_A1_bias; } // Load warp-level scale bias fragment from threadblock scale/bias vector if(PerChannelScale) { warp_tile_iterator_A1_scale_.load(warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2]); ++warp_tile_iterator_A1_scale_; } warp_tile_iterator_A1_bias_.load(warp_loaded_frag_A1_bias[(warp_mma_k + 1) % 2]); ++warp_tile_iterator_A1_bias_; // Load warp-level tile from accumulator fragment warp_tile_iterator_A1_.load(warp_loaded_frag_A1[(warp_mma_k + 1) % 2], warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2], warp_loaded_frag_A1_bias[(warp_mma_k + 1) % 2], output_op_0); ++warp_tile_iterator_A1_; // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations1); this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_B1_; if (warp_mma_k > 0) warp_mma1.transform(warp_transformed_frag_A1[warp_mma_k % 2], warp_transformed_frag_B1[warp_mma_k % 2], warp_loaded_frag_A1[warp_mma_k % 2], warp_loaded_frag_B1[warp_mma_k % 2]); // Issue global->shared copies for the next stage int group_start_iteration_B1; if (warp_mma_k + 1 == Base::kWarpGemmIterations1) { group_start_iteration_B1 = 0; } else { group_start_iteration_B1 = (warp_mma_k + 1) * Detail::kAccessesPerGroupB1; } copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1); warp_mma1( accum, warp_transformed_frag_A1[warp_mma_k % 2], warp_transformed_frag_B1[warp_mma_k % 2], accum ); if (warp_mma_k + 1 == Base::kWarpGemmIterations1) warp_mma1.transform(warp_transformed_frag_A1[(warp_mma_k + 1) % 2], warp_transformed_frag_B1[(warp_mma_k + 1) % 2], warp_loaded_frag_A1[(warp_mma_k + 1) % 2], warp_loaded_frag_B1[(warp_mma_k + 1) % 2]); if (warp_mma_k + 2 == Base::kWarpGemmIterations1) { // Inserts a fence to group cp.async instructions into stages. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages of cp.async have committed arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_B1.advance(); this->smem_iterator_B1_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_B1_.add_tile_offset( {-Base::kStages * Policy1::kPartitionsK * Base::kWarpGemmIterations1, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } } } } // Insert fence and wait for all outstanding cp.async operations to commit. cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
examples/13_two_tensor_op_fusion/threadblock/b2b_implicit_gemm_multistage.h/0
{ "file_path": "examples/13_two_tensor_op_fusion/threadblock/b2b_implicit_gemm_multistage.h", "repo_id": "examples", "token_count": 13619 }
4
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** Please check example 07, 08 and 17 for the basics of dense tensor op gemm kernels. NVIDIA Ampere architecture also supports structured sparse tensor op for tf32, fp16, int8 and int4. Sparse GEMM kernels needs to takes an additional E matrix which stores the meta data. The format of meta data is different for every data types. CUTLASS templates can automatically infer it based on input A and B. Check code below. Moreover, matrix E needs to be preprocessed so that it can use ldmatrix to load into the registers efficiently. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_sparse.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/host_reorder.h" #include "cutlass/util/host_uncompress.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = int32_t; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::int4b_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::int4b_t; // <- data type of elements in input matrix B using ElementOutput = int32_t; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Row Major for // Matrix A, Column Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 256>; // <- threadblock tile M = 128, N = 128, K = 256 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 256>; // <- warp tile M = 64, N = 64, K = 256 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 128>; // <- MMA Op tile M = 16, N = 8, K = 128 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 3; using Gemm = cutlass::gemm::device::SparseGemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; // Data type and layout of meta data matrix E can be inferred from template Gemm. using ElementInputE = typename Gemm::ElementE; using LayoutInputE = cutlass::layout::RowMajor; using ReorderedLayoutInputE = typename Gemm::LayoutE; // Blow property is defined in include/cutlass/arch/sp_mma_sm80.h // 50% Sparsity on Ampere constexpr int kSparse = Gemm::kSparse; // How many elements of A are covered per ElementE constexpr int kElementsPerElementE = Gemm::kElementsPerElementE; // The size of individual meta data constexpr int kMetaSizeInBits = Gemm::kMetaSizeInBits; int run() { const int length_m = 512; const int length_n = 512; const int length_k = 1024; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse)); // <- Create matrix A with dimensions M x (K / 2) cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a_uncompressed( problem_size.mk()); // <- Create uncompressed matrix A with dimensions M x K for reference computing cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Create matrix E with dimensions M x (K / 2 / kElementsPerElementE). This one is used by reference computing. cutlass::HostTensor<ElementInputE, LayoutInputE> tensor_e( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE)); // Same size as the above. The above one needs to be reordered and stored in this one. cutlass::HostTensor<ElementInputE, ReorderedLayoutInputE> tensor_e_reordered( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE)); // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(2), ElementInputA(-2), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(2), ElementInputB(-2), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(2), ElementOutput(-2), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomSparseMeta( tensor_e.host_view(), 1, kMetaSizeInBits); // <- Fill matrix E on host with uniform-distribution random meta data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Reorder the meta data matrix so that we can use ldmatrix to load them to tensor core // instructions. cutlass::reorder_meta(tensor_e_reordered.host_ref(), tensor_e.host_ref(), {problem_size.m(), problem_size.n(), problem_size.k() / kSparse / kElementsPerElementE}); // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_e_reordered.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device tensor_e_reordered.device_ref(), // <- reference to matrix E on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Check the problem size is supported or not cutlass::Status status = gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // uncompress tensor_a based on meta data tensor_e. We need it for reference computing. cutlass::uncompress(tensor_a_uncompressed.host_ref(), tensor_a.host_ref(), tensor_e.host_ref(), problem_size.m(), problem_size.k()); // Create instantiation for host reference gemm kernel cutlass::reference::host::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue, typename Gemm::Operator> gemm_host; // Launch host reference gemm kernel gemm_host(problem_size, alpha, tensor_a_uncompressed.host_ref(), tensor_b.host_ref(), beta, tensor_c.host_ref(), tensor_ref_d.host_ref()); // Copy output data from CUTLASS host for comparison tensor_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; return (passed ? 0 : -1); } int main() { bool notSupported = false; // Ampere Sparse Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.1. // // CUTLASS must be compiled with CUDA 11.1 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.1 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (props.major * 10 + props.minor < 80) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } return run(); }
examples/15_ampere_sparse_tensorop_gemm/ampere_sparse_tensorop_gemm.cu/0
{ "file_path": "examples/15_ampere_sparse_tensorop_gemm/ampere_sparse_tensorop_gemm.cu", "repo_id": "examples", "token_count": 5912 }
5
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief CUTLASS Layernorm Example. This workload provides a layer normalization example using a one-pass, square-sum-based variance calculation. Specifically, we fuse the reduction operation to find local mean and local square sum mean in the epilogue of 1st GEMM. After a light full reduction kernel, the mean / variance values are readily calculated for element-wise operations which are fused into the 2nd GEMM. As stated in https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data, the square-sum based one-pass implementation may raise concerns on numerical stability issues. That being said, though this fully fused layernorm example almost perfectly hides all the memory cost to access the intermediate matrix for layernorm computation, the numerical issue might hinder a persuasive usage in real-world scenarios. If that is the case, a user may turn to the stand-alone CUTLASS layernorm example in tools/util/include/cutlass/util/device_layernorm.h Examples: # Run a CUTLASS layernorm example with default setup , # using the language of the transformer model as an example, (Column Major output matrix, hidden dimension = 768, valid word number = 4096, intermediate_scale = 4) $ ./examples/37_gemm_layernorm_gemm_fusion/37_gemm_layernorm_gemm_fusion # Run an attention example with hidden dimension = 512 $ ./examples/37_gemm_layernorm_gemm_fusion/37_gemm_layernorm_gemm_fusion --hidden_dim=512 */ #include <cmath> #include <iostream> #include <vector> #include <limits> #include "cutlass/cutlass.h" #include "cutlass/arch/memory.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/gemm/device/gemm_complex.h" #include "cutlass/epilogue/thread/scale_type.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/gemm_complex.h" #include "cutlass/util/reference/host/tensor_reduce.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/error_metrics.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/fast_math.h" ///////////////////////////////////////////////////////////////////////////////////////////////// #include "gemm_with_layernorm.h" ///////////////////////////////////////////////////////////////////////////////////////////////// enum class Disposition { kPassed, kIncorrect, kNotVerified }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing template<typename LayoutOutput_> struct Options { using LayoutOutput = LayoutOutput_; static bool const kIsColumnMajorOutput = cutlass::platform::is_same<LayoutOutput, cutlass::layout::ColumnMajor>::value; bool help; cutlass::gemm::GemmCoord problem_size0; cutlass::gemm::GemmCoord problem_size1; int hidden_dim; int valid_word_num; int intermediate_scale; int iterations; unsigned seed; float alpha; float beta; bool verification_enabled; double tolerance; Options(): help(false), iterations(20), seed(2022), hidden_dim(768), valid_word_num(4096), intermediate_scale(4), alpha(1), beta(0), verification_enabled(true), tolerance(0.01), problem_size1(problem_size0.m() * 4, problem_size0.n(), problem_size0.m()) { } bool valid() { return true; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } cmd.get_cmd_line_argument("hidden_dim", hidden_dim, 768); cmd.get_cmd_line_argument("valid_word_num", valid_word_num, 4096); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("verify", verification_enabled); cmd.get_cmd_line_argument("seed", seed); cmd.get_cmd_line_argument("tolerance", tolerance); if (kIsColumnMajorOutput) { // column major output setup problem_size0.m() = hidden_dim; problem_size0.n() = valid_word_num; problem_size0.k() = hidden_dim; problem_size1.m() = hidden_dim * intermediate_scale; problem_size1.n() = valid_word_num; problem_size1.k() = hidden_dim; }else{ // row major output setup problem_size0.m() = valid_word_num; problem_size0.n() = hidden_dim; problem_size0.k() = hidden_dim; problem_size1.m() = valid_word_num; problem_size1.n() = hidden_dim * intermediate_scale; problem_size1.k() = hidden_dim; } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "37_gemm_layernorm_gemm_fusion example\n\n" << " This example uses the CUTLASS Library to compute GEMM + Layernorm for arbitrary problem sizes.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --hidden_dim=<int> Hidden dimension\n" << " --valid_word_num=<int> Valid word number\n" << " --seed=<int> Random number seed (1*)\n\n" << " --iterations=<int> Number of profiling iterations to perform (0 to disable profiling).\n\n" << " --verify=<bool> If true, performs reference calculation.\n\n" << " --tolerance <float> Error tolerance\n" ; out << "\n\nExamples:\n\n" << "$ ./examples/37_gemm_layernorm_gemm_fusion/37_gemm_layernorm_gemm_fusion \\\n" << " --hidden_dim=768 --valid_word_num=1024 \n\n"; return out; } /// Returns true if the environment and Toolkit support this bool supported(bool verbose = true) const { // Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { if (verbose) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; } return false; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { if (verbose) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; } return false; } if (!((props.major * 10 + props.minor) >= 80)) { if (verbose) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; } return false; } // // CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently, // all pointers, strides, and tensor extents must be divisible by 8 elements. // int const kAlignment = 8; if ((problem_size0.m() % kAlignment) || (problem_size0.n() % kAlignment) || (problem_size0.k() % kAlignment)) { if (verbose) { std::cerr << "Misaligned input in 1st GEMM." << std::endl; } // misaligned tensors for Gemm1 return false; } if ((problem_size1.m() % kAlignment) || (problem_size1.n() % kAlignment) || (problem_size1.k() % kAlignment)) { if (verbose) { std::cerr << "Misaligned input in 2nd GEMM." << std::endl; } // misaligned tensors for Gemm2 return false; } return true; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template< typename LayoutOutput_> struct Testbed { // // Type definitions // // User-defined data types using ElementInputA0 = cutlass::half_t; using ElementInputB0 = cutlass::half_t; using ElementOutput = cutlass::half_t; using ElementCompute = cutlass::half_t; using LayoutInputA0 = cutlass::layout::RowMajor; using LayoutInputB0 = cutlass::layout::ColumnMajor; using LayoutOutput = LayoutOutput_; static bool const kIsColumnMajorOutput = cutlass::platform::is_same<LayoutOutput, cutlass::layout::ColumnMajor>::value; // turn of shifted K by default static bool const kIsShiftedVariance = false; /// Linear scaling operator using EpilogueFunctorOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementCompute, ElementCompute >; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; static int const kStages0 = 3; static int const kStages1 = 4; using GemmLayernorm = cutlass::GemmLayernorm< ElementInputA0, LayoutInputA0, ElementInputB0, LayoutInputB0, ElementOutput, LayoutOutput, ElementCompute, EpilogueFunctorOp, ThreadblockShape, WarpShape, InstructionShape, kStages0, kStages1, kIsShiftedVariance >; using ElementInputA1 = typename GemmLayernorm::ElementInputA1; using ElementOutputC1 = typename GemmLayernorm::ElementOutputC1; using ElementInputScaleBias = typename GemmLayernorm::ElementInputScaleBias; using ElementLayernormCompute = typename GemmLayernorm::ElementLayernormCompute; using LayoutInputA1 = typename GemmLayernorm::LayoutInputA1; using LayoutOutputC0 = typename GemmLayernorm::LayoutOutputC0; using LayoutOutputC1 = typename GemmLayernorm::LayoutOutputC1; using LayoutInputScaleBias = typename GemmLayernorm::LayoutInputScaleBias; // // Data members // Options<LayoutOutput> const &options; cutlass::HostTensor<ElementInputA0, LayoutInputA0> tensor_A0; cutlass::HostTensor<ElementInputB0, LayoutInputB0> tensor_B0; cutlass::HostTensor<ElementOutput, LayoutOutputC0> tensor_C0; cutlass::HostTensor<ElementInputA1, LayoutInputA1> tensor_A1; cutlass::HostTensor<ElementOutputC1, LayoutOutputC1> tensor_C1; cutlass::HostTensor<ElementOutput, LayoutOutputC0> reference_C0; cutlass::HostTensor<ElementOutputC1, LayoutOutputC1> reference_C1; cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias> tensor_Variance; cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias> tensor_Mean; cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias> tensor_Beta; cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias> tensor_Gamma; cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias> reference_Mean; cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias> reference_Variance; // shifted K tensor to better ensure the numerical stability // According to https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance // the closer shifted K to the actual mean, the better numerical stability we'll observe cutlass::HostTensor<ElementOutput, LayoutOutputC0> tensor_Shifted_K; // // Methods // Testbed( Options<LayoutOutput> const &options_ ): options(options_) { tensor_A0.reset({options.problem_size0.m(), options.problem_size0.k()}); tensor_B0.reset({options.problem_size0.k(), options.problem_size0.n()}); tensor_C0.reset({options.problem_size0.m(), options.problem_size0.n()}); tensor_A1.reset({options.problem_size1.m(), options.problem_size1.k()}); tensor_C1.reset({options.problem_size1.m(), options.problem_size1.n()}); reference_C0.reset({options.problem_size0.m(), options.problem_size0.n()}); reference_C1.reset({options.problem_size1.m(), options.problem_size1.n()}); int leading_dim_0 = kIsColumnMajorOutput ? options.problem_size0.n() : options.problem_size0.m(); int leading_dim_1 = kIsColumnMajorOutput ? options.problem_size0.m() : options.problem_size0.n(); int block_num = (leading_dim_1 + GemmLayernorm::ThreadblockShape::kM - 1) / GemmLayernorm::ThreadblockShape::kM; tensor_Variance.reset({block_num, leading_dim_0}); tensor_Mean.reset({block_num, leading_dim_0}); tensor_Shifted_K.reset({1, leading_dim_0}); tensor_Beta.reset({1, leading_dim_1}); tensor_Gamma.reset({1, leading_dim_1}); reference_Mean.reset({1, leading_dim_0}, false); reference_Variance.reset({1, leading_dim_0}, false); } /// Run Disposition run() { Disposition disposition = Disposition::kNotVerified; // // Initialize the workspace // initialize(); // // Launch device kernel // cutlass::Status status = cutlass::Status::kSuccess; status = execute_device_kernel(); if (status != cutlass::Status::kSuccess) { std::cerr << "Device execution failed." << std::endl; return disposition; } cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cerr << "Device synchronize failed with error " << cudaGetErrorString(result) << std::endl; return disposition; } // // Compute the reference // compute_reference(); // // Verify // if (options.verification_enabled) { bool passed = verify(); if (passed) { disposition = Disposition::kPassed; } else { disposition = Disposition::kIncorrect; } } // // Profiling // if (options.iterations) { profile(); } return disposition; } /// Random initialization void initialize() { cutlass::reference::host::TensorFillRandomUniform( tensor_A0.host_view(), options.seed, ElementInputA0(4), ElementInputA0(-4), 0 ); cutlass::reference::host::TensorFillRandomUniform( tensor_B0.host_view(), options.seed + 1, ElementInputB0(4), ElementInputB0(-4), 0 ); cutlass::reference::host::TensorFillRandomUniform( tensor_A1.host_view(), options.seed + 2, ElementInputA1(4), ElementInputA1(-4), 0 ); cutlass::reference::host::TensorFillRandomUniform( tensor_Beta.host_view(), options.seed + 3, ElementInputScaleBias(4), ElementInputScaleBias(-4), 0 ); cutlass::reference::host::TensorFillRandomUniform( tensor_Gamma.host_view(), options.seed + 4, ElementInputScaleBias(4), ElementInputScaleBias(-4), 0 ); cutlass::reference::host::TensorFillRandomUniform( tensor_Shifted_K.host_view(), options.seed + 5, ElementOutput(4), ElementOutput(-5), 0 ); tensor_A0.sync_device(); tensor_B0.sync_device(); tensor_A1.sync_device(); tensor_Beta.sync_device(); tensor_Gamma.sync_device(); } cutlass::Status execute_device_kernel() { cutlass::Status status = cutlass::Status::kSuccess; // // Setup arguments // typename GemmLayernorm::Arguments args( options.problem_size0, options.problem_size1, tensor_A0.device_ref().data(), tensor_B0.device_ref().data(), tensor_C0.device_ref().data(), tensor_C0.device_ref().data(), tensor_A1.device_ref().data(), tensor_C1.device_ref().data(), tensor_A0.device_ref().stride(0), tensor_B0.device_ref().stride(0), tensor_C0.device_ref().stride(0), tensor_C0.device_ref().stride(0), tensor_A1.device_ref().stride(0), tensor_C1.device_ref().stride(0), { ElementCompute(options.alpha), ElementCompute(options.beta) }, tensor_Variance.device_ref(), tensor_Mean.device_ref(), tensor_Gamma.device_ref(), tensor_Beta.device_ref(), tensor_Shifted_K.device_ref().data() ); // // Launch // GemmLayernorm gemm_layernorm; // Initialize status = gemm_layernorm.initialize(args); if (status != cutlass::Status::kSuccess) { return status; } // Run status = gemm_layernorm(); return status; } /// Reference calculation void compute_reference() { cutlass::reference::device::Gemm< ElementInputA0, LayoutInputA0, ElementInputB0, LayoutInputB0, ElementOutput, LayoutOutputC0, ElementCompute, ElementCompute > gemm_device0; cutlass::reference::device::Gemm< ElementInputA1, LayoutInputA1, ElementOutput, LayoutOutputC0, ElementOutputC1, LayoutOutputC1, ElementCompute, ElementCompute > gemm_device1; // Compute 1st GEMM gemm_device0( options.problem_size0, ElementCompute(options.alpha), tensor_A0.device_ref(), tensor_B0.device_ref(), ElementCompute(options.beta), tensor_C0.device_ref(), reference_C0.device_ref() ); reference_C0.sync_host(); tensor_Mean.sync_host(); tensor_Variance.sync_host(); tensor_Gamma.sync_host(); tensor_Beta.sync_host(); tensor_Shifted_K.sync_host(); // Compute the sum and square sum for verification purpose if (kIsColumnMajorOutput) { for (int n = 0; n < options.problem_size0.n(); ++n) { ElementLayernormCompute sum = ElementLayernormCompute(0); ElementLayernormCompute square_sum = ElementLayernormCompute(0); for (int m = 0; m < options.problem_size0.m(); ++m) { sum += ElementLayernormCompute(reference_C0.at({m, n})); square_sum += ElementLayernormCompute(reference_C0.at({m, n})) * ElementLayernormCompute(reference_C0.at({m, n})); } ElementLayernormCompute mean = sum / ElementLayernormCompute(options.problem_size0.m()); ElementLayernormCompute square_mean = square_sum / ElementLayernormCompute(options.problem_size0.m()); ElementLayernormCompute variance = cutlass::constants::one<ElementLayernormCompute>() / cutlass::fast_sqrt(square_mean - mean * mean + ElementLayernormCompute(1e-6) ) ; mean = -mean * variance; reference_Mean.at({0, n}) = ElementInputScaleBias(mean); reference_Variance.at({0, n}) = ElementInputScaleBias(variance); } }else{ for (int m = 0; m < options.problem_size0.m(); ++m) { ElementLayernormCompute sum = ElementLayernormCompute(0); ElementLayernormCompute square_sum = ElementLayernormCompute(0); for (int n = 0; n < options.problem_size0.n(); ++n) { sum += ElementLayernormCompute(reference_C0.at({m, n})) ; square_sum += ElementLayernormCompute(reference_C0.at({m, n})) * ElementLayernormCompute(reference_C0.at({m, n})) ; } ElementLayernormCompute mean = sum / ElementLayernormCompute(options.problem_size0.n()); ElementLayernormCompute square_mean = square_sum / ElementLayernormCompute(options.problem_size0.n()); ElementLayernormCompute variance = cutlass::constants::one<ElementLayernormCompute>() / cutlass::fast_sqrt(square_mean - mean * mean + ElementLayernormCompute(1e-6)) ; mean = -mean * variance; reference_Mean.at({0, m}) = ElementInputScaleBias(mean); reference_Variance.at({0, m}) = ElementInputScaleBias(variance); } } // Element-wise transform for OutputC0 using 1-pass layernorm algo if (kIsColumnMajorOutput) { for (int n = 0; n < options.problem_size0.n(); ++n) { ElementLayernormCompute sum = ElementLayernormCompute(0); for (int m = 0; m < options.problem_size0.m(); ++m) { sum += ElementLayernormCompute(reference_C0.at({m, n})) ; } ElementInputScaleBias mean = ElementInputScaleBias(sum / ElementLayernormCompute(options.problem_size0.m())); sum = ElementLayernormCompute(0); for (int m = 0; m < options.problem_size0.m(); ++m) { sum += ElementLayernormCompute(reference_C0.at({m, n}) - ElementLayernormCompute(mean)) * ElementLayernormCompute(reference_C0.at({m, n}) - ElementLayernormCompute(mean)) ; } ElementLayernormCompute square_mean = sum / ElementLayernormCompute(options.problem_size0.m()); ElementInputScaleBias variance = ElementInputScaleBias(cutlass::constants::one<ElementLayernormCompute>() / cutlass::fast_sqrt(square_mean + ElementLayernormCompute(1e-6))) ; for (int m = 0; m < options.problem_size0.m(); ++m) { reference_C0.at({m, n}) = ElementOutput( ( (ElementInputScaleBias(reference_C0.at({m, n})) - mean) * variance ) * tensor_Gamma.at({0, m}) + tensor_Beta.at({0, m})); } } }else{ for (int m = 0; m < options.problem_size0.m(); ++m) { float sum = float(0); for (int n = 0; n < options.problem_size0.n(); ++n) { sum += float(reference_C0.at({m, n})) ; } float mean = sum / float(options.problem_size0.n()); sum = float(0); for (int n = 0; n < options.problem_size0.n(); ++n) { sum += float(reference_C0.at({m, n}) - mean) * float(reference_C0.at({m, n}) - mean) ; } float square_mean = sum / float(options.problem_size0.n()); float variance = cutlass::constants::one<float>() / cutlass::fast_sqrt(square_mean + ElementLayernormCompute(1e-6)) ; for (int n = 0; n < options.problem_size0.n(); ++n) { reference_C0.at({m, n}) = ElementOutput( ( (float(reference_C0.at({m, n})) - mean) * variance ) * float(tensor_Gamma.at({0, n})) + float(tensor_Beta.at({0, n}))); } } } // Sync host data with device after element-wise transform reference_C0.sync_device(); // Compute 2nd GEMM gemm_device1( options.problem_size1, ElementCompute(options.alpha), kIsColumnMajorOutput ? tensor_A1.device_ref() : reference_C0.device_ref(), kIsColumnMajorOutput ? reference_C0.device_ref() :tensor_A1.device_ref(), ElementCompute(options.beta), reference_C1.device_ref(), reference_C1.device_ref() ); } /// Emits all tensor values void emit_results() { std::cout << "tensor_C1 = \n" << tensor_C1.host_view() << "\n\n"; std::cout << "Reference C1 = \n" << reference_C1.host_view() << "\n\n"; std::cout << "Mean = \n" << tensor_Mean.host_view() << "\n\n"; std::cout << "rsqrt(Variance) = \n" << tensor_Variance.host_view() << "\n\n"; std::cout << "Reference Mean = \n" << reference_Mean.host_view() << "\n\n"; std::cout << "Reference rsqrt(Variance) = \n" << reference_Variance.host_view() << "\n\n"; } template<typename Element, typename Layout> bool verify_tensor(cutlass::HostTensor<Element, Layout> tensor, \ cutlass::HostTensor<Element, Layout> reference, int leading_dim0, int leading_dim1, bool is_print = false) { float const kThreshold = float(options.tolerance); float const kAbsThreshold = 0.5f; float const kRelativeThreshold = 0.1f; // Adds a constant bias to avoid being divided by '0' float const kBias = 1e-5f; int counter = 0; for (int m = 0; m < leading_dim0; m++) { for (int n = 0; n < leading_dim1; ++n) { float diff = (float)(tensor.at({m, n}) - reference.at({m, n})); float rel_diff = fabs(diff) / fabs(reference.at({m, n}) + kBias); if (fabs(diff) > kAbsThreshold && rel_diff > kRelativeThreshold) { counter++; } } } float err_rate = float(counter) / (float(leading_dim0) * float(leading_dim1)); return (err_rate < kThreshold); } /// Verifies the reference matches bool verify() { tensor_Variance.sync_host(); tensor_Mean.sync_host(); tensor_C1.sync_host(); reference_C1.sync_host(); // Verification checks - set any of these to 'true' to override the verification checks. bool verified_C1 = false; bool verified_Mean = false; bool verified_Variance = false; // Verify layernorm output if (!verified_C1) { verified_C1 = verify_tensor<ElementOutputC1, LayoutOutputC1>(tensor_C1, reference_C1, options.problem_size1.m(), options.problem_size1.n()); } if (!verified_Variance) { verified_Variance = verify_tensor<ElementInputScaleBias, LayoutInputScaleBias>(tensor_Variance, reference_Variance, 1, options.problem_size0.n()); } if (!verified_Mean) { verified_Mean = verify_tensor<ElementInputScaleBias, LayoutInputScaleBias>(tensor_Mean, reference_Mean, 1, options.problem_size0.n()); } if (!verified_C1 || !verified_Mean || !verified_Variance) { // emit_results(); std::cerr << "Verification check failed for tensor Layernorm" << std::endl; // Summarize which checks failed if (!verified_C1) { std::cerr << "Verification of O tensor failed\n"; } if (!verified_Mean) { std::cerr << "Verification of Mean tensor failed\n"; } if (!verified_Variance) { std::cerr << "Verification of Variance tensor failed\n"; } return false; } return true; } /// Profiles bool profile() { // // Profile // cutlass::Status status = cutlass::Status::kSuccess; cudaError_t result; cudaEvent_t events[2]; int const kIterations = options.iterations; for (cudaEvent_t &evt : events) { result = cudaEventCreate(&evt); if (result != cudaSuccess) { std::cerr << "cudaEventCreate failed with error " << cudaGetErrorString(result) << std::endl; return false; } } result = cudaEventRecord(events[0]); if (result != cudaSuccess) { std::cerr << "cudaEventRecord() failed with error " << cudaGetErrorString(result) << std::endl; return false; } for (int iter = 0; iter < kIterations; ++iter) { status = execute_device_kernel(); if (status != cutlass::Status::kSuccess) { std::cerr << "Device execution failed." << std::endl; return false; } } result = cudaEventRecord(events[1]); if (result != cudaSuccess) { std::cerr << "cudaEventRecord() failed with error " << cudaGetErrorString(result) << std::endl; return false; } result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cerr << "cudaDeviceSynchronize() failed with error " << cudaGetErrorString(result) << std::endl; return false; } float elapsed_ms = 0; result = cudaEventElapsedTime(&elapsed_ms, events[0], events[1]); float elapsed_ms_per_iter = elapsed_ms / float(kIterations); if (result != cudaSuccess) { std::cerr << "cudaEventElapsedTime() failed with error " << cudaGetErrorString(result) << std::endl; return false; } for (cudaEvent_t &evt : events) { result = cudaEventDestroy(evt); if (result != cudaSuccess) { std::cerr << "cudaEventDestroy() failed with error " << cudaGetErrorString(result) << std::endl; return false; } } int64_t flops = int64_t(options.problem_size0.m()) * options.problem_size0.n() * options.problem_size0.k() * 2 \ + int64_t(options.problem_size1.m()) * options.problem_size1.n() * options.problem_size1.k() * 2; double gflops_per_second = double(flops) * kIterations / double(elapsed_ms / 1000.0f) / double(1.0e9); std::cout << " 1st GEMM: " << options.problem_size0.m() << "-by-" << options.problem_size0.n() << "-by-" << options.problem_size0.k() << "\n" << " 2nd GEMM: " << options.problem_size1.m() << "-by-" << options.problem_size1.n() << "-by-" << options.problem_size1.k() << std::endl; std::cout << " Runtime / iteration: " << elapsed_ms_per_iter << " ms\n" << std::endl; std::cout << " GFLOPs: " << gflops_per_second << " GFLOPs" << std::endl; return true; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, const char **argv) { // Define final layout using LayoutOutput = cutlass::layout::ColumnMajor; // Options parsing Options<LayoutOutput> options; options.parse(argc, argv); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (!options.supported()) { return 0; } // Run Testbed<LayoutOutput> testbed(options); Disposition disposition = testbed.run(); std::cout << std::endl; switch (disposition) { case Disposition::kPassed: std::cout << "Passed" << std::endl; break; case Disposition::kIncorrect: std::cout << "Incorrect" << std::endl; break; case Disposition::kNotVerified: std::cout << "Not verified" << std::endl; break; } return (disposition == Disposition::kPassed ? 0 : -1); } /////////////////////////////////////////////////////////////////////////////////////////////////
examples/37_gemm_layernorm_gemm_fusion/gemm_layernorm.cu/0
{ "file_path": "examples/37_gemm_layernorm_gemm_fusion/gemm_layernorm.cu", "repo_id": "examples", "token_count": 12217 }
6
################################################################################ # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################ """ Basic example of using the CUTLASS Python interface to run a GEMM """ import sys print("This example is deprecated. Please see examples/python for examples of using " "the CUTLASS Python interface.") sys.exit(0) import argparse import numpy as np import cutlass_bindings import cutlass.backend as pycutlass from cutlass.backend import * from cutlass.backend.utils.device import device_cc parser = argparse.ArgumentParser(description="Launch a GEMM kernel from Python: 'D = alpha * A * B + beta * C'") parser.add_argument("--m", default=128, type=int, help="M dimension of the GEMM") parser.add_argument("--n", default=128, type=int, help="N dimension of the GEMM") parser.add_argument("--k", default=128, type=int, help="K dimension of the GEMM") parser.add_argument('--print_cuda', action="store_true", help="Print the underlying CUDA kernel") try: args = parser.parse_args() except: sys.exit(0) # Check that the device is of a sufficient compute capability cc = device_cc() assert cc >= 70, "The CUTLASS Python GEMM example requires compute capability greater than or equal to 70." alignment = 8 assert args.m % alignment == 0, "M dimension of size {} is not divisible by alignment of {}".format(args.m, alignment) assert args.n % alignment == 0, "N dimension of size {} is not divisible by alignment of {}".format(args.n, alignment) assert args.k % alignment == 0, "K dimension of size {} is not divisible by alignment of {}".format(args.k, alignment) np.random.seed(0) # Allocate a pool of device memory to be used by the kernel pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32) # Set the compiler to use to NVCC pycutlass.compiler.nvcc() # Set up A, B, C and accumulator A = TensorDescription(cutlass_bindings.float16, cutlass_bindings.ColumnMajor, alignment) B = TensorDescription(cutlass_bindings.float16, cutlass_bindings.RowMajor, alignment) C = TensorDescription(cutlass_bindings.float32, cutlass_bindings.ColumnMajor, alignment) element_acc = cutlass_bindings.float32 element_epilogue = cutlass_bindings.float32 # Select instruction shape based on the Tensor Core instructions supported # by the device on which we are running if cc == 70: instruction_shape = [8, 8, 4] elif cc == 75: instruction_shape = [16, 8, 8] else: # Use CUTLASS kernels for CC 80 by default (e.g., for cases in which SM86 is used) cc = 80 instruction_shape = [16, 8, 16] math_inst = MathInstruction( instruction_shape, A.element, B.element, element_acc, cutlass_bindings.OpClass.TensorOp, MathOperation.multiply_add ) tile_description = TileDescription( [128, 128, 32], # Threadblock shape 2, # Number of stages [2, 2, 1], # Number of warps within each dimension of the threadblock shape math_inst ) epilogue_functor = pycutlass.LinearCombination(C.element, C.alignment, element_acc, element_epilogue) operation = GemmOperationUniversal( arch=cc, tile_description=tile_description, A=A, B=B, C=C, epilogue_functor=epilogue_functor) if args.print_cuda: print(operation.rt_module.emit()) operations = [operation, ] # Compile the operation pycutlass.compiler.add_module(operations) # Randomly initialize tensors tensor_A = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(args.m * args.k,))).astype(np.float16) tensor_B = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(args.k * args.n,))).astype(np.float16) tensor_C = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(args.m * args.n,))).astype(np.float32) tensor_D = np.zeros(shape=(args.m * args.n,)).astype(np.float32) problem_size = cutlass_bindings.gemm.GemmCoord(args.m, args.n, args.k) alpha = 1. beta = 0. arguments = GemmArguments( operation=operation, problem_size=problem_size, A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D, output_op=operation.epilogue_type(alpha, beta)) # Run the operation operation.run(arguments) arguments.sync() # Run the host reference module and compare to the CUTLASS result reference = ReferenceModule(A, B, C) tensor_D_ref = reference.run(tensor_A, tensor_B, tensor_C, problem_size, alpha, beta) try: assert np.array_equal(tensor_D, tensor_D_ref) except: assert np.allclose(tensor_D, tensor_D_ref, atol=1e-5) print("Passed.")
examples/40_cutlass_py/gemm.py/0
{ "file_path": "examples/40_cutlass_py/gemm.py", "repo_id": "examples", "token_count": 1993 }
7
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a double-buffered threadblock-scoped GEMM kernel. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/cache_operation.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "custom_mma_base.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// Data type of accumulator matrix typename ElementC_, /// Data type of accumulator matrix typename LayoutC_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Use zfill or predicate for out-of-bound cp.async SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone, /// Upper boundon the K dimension int kMaxK = cutlass::platform::numeric_limits<int>::max(), /// Used for partial specialization typename Enable = bool> class CustomMmaMultistage : public CustomMmaBase<Shape_, Policy_, Stages> { public: ///< Base class using Base = CustomMmaBase<Shape_, Policy_, Stages>; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Iterates over tiles of A operand in global memory using IteratorA = IteratorA_; ///< Iterates over tiles of B operand in global memory using IteratorB = IteratorB_; ///< Data type of accumulator matrix using ElementC = ElementC_; ///< Layout of accumulator matrix using LayoutC = LayoutC_; ///< Policy describing tuning details using Policy = Policy_; using SmemIteratorA = SmemIteratorA_; using SmemIteratorB = SmemIteratorB_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; // // Dependent types // /// Fragment of accumulator tile using FragmentC = typename Policy::Operator::FragmentC; /// Warp-level Mma using Operator = typename Policy::Operator; /// Minimum architecture is Sm80 to support cp.async using ArchTag = arch::Sm80; /// Complex transform on A operand static ComplexTransform const kTransformA = Operator::kTransformA; /// Complex transform on B operand static ComplexTransform const kTransformB = Operator::kTransformB; /// Internal structure exposed for introspection. struct Detail { static_assert( Base::kWarpGemmIterations > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); /// Number of cp.async instructions to load one stage of operand A static int const AsyncCopyIterationsPerStageA = IteratorA::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const AsyncCopyIterationsPerStageB = IteratorB::ThreadMap::Iterations::kCount; /// Number of stages static int const kStages = Stages; /// Number of cp.async instructions to load on group of operand A static int const kAccessesPerGroupA = (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB = (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; }; static bool const kSmemContainsEntireMat = kMaxK <= Shape::kK * Stages; static constexpr int kNumStagesConcurrentLoad = kSmemContainsEntireMat ? Stages : Stages - 1; private: using WarpLoadedFragmentA = typename Operator::FragmentA; using WarpLoadedFragmentB = typename Operator::FragmentB; using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; private: // // Data members // /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA smem_iterator_A_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB smem_iterator_B_; bool prologue_done_; // Set to `True` to ensure the accumulator will be zero outside the GEMM // footprint bool zero_outside_bounds_; public: /// Construct from tensor references CUTLASS_DEVICE CustomMmaMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::SharedStorageA& shared_storageA, typename Base::SharedStorageB& shared_storageB, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx) : Base(shared_storageA, shared_storageB, thread_idx, warp_idx, lane_idx), smem_iterator_A_(shared_storageA.ref(), thread_idx), smem_iterator_B_(shared_storageB.ref(), thread_idx), prologue_done_(false), zero_outside_bounds_(false) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_B_.add_tile_offset( {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); } CUTLASS_DEVICE CustomMmaMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::SharedStorage& st, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx) : CustomMmaMultistage( st.operand_A, st.operand_B, thread_idx, warp_idx, lane_idx) {} CUTLASS_DEVICE bool set_prologue_done(bool value) { prologue_done_ = value; return true; } CUTLASS_DEVICE bool set_zero_outside_bounds(bool value) { zero_outside_bounds_ = value; return true; } template <bool kLoadA = true, bool kLoadB = true> CUTLASS_DEVICE static void prologue( typename Base::SharedStorage& shared_storage, ///< iterator over A operand in global memory IteratorA iterator_A, ///< iterator over B operand in global memory IteratorB iterator_B, int thread_idx, int problem_size_k) { prologue<kLoadA, kLoadB>( shared_storage.operand_A, shared_storage.operand_B, iterator_A, iterator_B, thread_idx, problem_size_k); } template <bool kLoadA = true, bool kLoadB = true> CUTLASS_DEVICE static void prologue( typename Base::SharedStorageA& shared_storageA, typename Base::SharedStorageB& shared_storageB, ///< iterator over A operand in global memory IteratorA iterator_A, ///< iterator over B operand in global memory IteratorB iterator_B, int thread_idx, int problem_size_k) { SmemIteratorA smem_iterator_A(shared_storageA.ref(), thread_idx); SmemIteratorB smem_iterator_B(shared_storageB.ref(), thread_idx); int32_t iter = (problem_size_k + Base::Shape::kK - 1) / Base::Shape::kK; _prologue<kLoadA, kLoadB>( iterator_A, iterator_B, iter, smem_iterator_A, smem_iterator_B); } CUTLASS_DEVICE void copy_tiles_and_advance( IteratorA& iterator_A, IteratorB& iterator_B, int group_start_A = 0, int group_start_B = 0) { iterator_A.set_iteration_index( group_start_A * IteratorA::kAccessesPerVector); this->smem_iterator_A_.set_iteration_index(group_start_A); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { typename IteratorA::AccessType* dst_ptr = reinterpret_cast<typename IteratorA::AccessType*>( this->smem_iterator_A_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_A.get(); if (zero_outside_bounds_ || SharedMemoryClear == SharedMemoryClearOption::kZfill) { cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, gmem_ptr, iterator_A.valid()); } else { cutlass::arch::cp_async<kSrcBytes, kCacheOpA>( dst_ptr + v, gmem_ptr, iterator_A.valid()); } ++iterator_A; } ++this->smem_iterator_A_; } } iterator_B.set_iteration_index( group_start_B * IteratorB::kAccessesPerVector); this->smem_iterator_B_.set_iteration_index(group_start_B); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { typename IteratorB::AccessType* dst_ptr = reinterpret_cast<typename IteratorB::AccessType*>( this->smem_iterator_B_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_B.get(); if (zero_outside_bounds_ || SharedMemoryClear == SharedMemoryClearOption::kZfill) { cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, gmem_ptr, iterator_B.valid()); } else { cutlass::arch::cp_async<kSrcBytes, kCacheOpB>( dst_ptr + v, gmem_ptr, iterator_B.valid()); } ++iterator_B; } ++this->smem_iterator_B_; } } } template <bool kLoadA = true, bool kLoadB = true> CUTLASS_DEVICE static void _prologue( IteratorA& iterator_A, IteratorB& iterator_B, int32_t& gemm_k_iterations, SmemIteratorA& smem_iterator_A_, SmemIteratorB& smem_iterator_B_) { // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < kNumStagesConcurrentLoad; ++stage, --gemm_k_iterations) { iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); iterator_A.set_iteration_index(0); smem_iterator_A_.set_iteration_index(0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { typename IteratorA::AccessType* dst_ptr = reinterpret_cast<typename IteratorA::AccessType*>( smem_iterator_A_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; int src_bytes = (iterator_A.valid() ? kSrcBytes : 0); if (kLoadA) { cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, iterator_A.get(), iterator_A.valid()); } ++iterator_A; } ++smem_iterator_A_; } iterator_B.set_iteration_index(0); smem_iterator_B_.set_iteration_index(0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { typename IteratorB::AccessType* dst_ptr = reinterpret_cast<typename IteratorB::AccessType*>( smem_iterator_B_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; if (kLoadB) { cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, iterator_B.get(), iterator_B.valid()); } ++iterator_B; } ++smem_iterator_B_; } // Move to the next stage iterator_A.add_tile_offset({0, 1}); iterator_B.add_tile_offset({1, 0}); smem_iterator_A_.add_tile_offset({0, 1}); smem_iterator_B_.add_tile_offset({1, 0}); // Defines the boundary of a stage of cp.async. cutlass::arch::cp_async_fence(); } } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations, ///< destination accumulator tile FragmentC& accum, ///< iterator over A operand in global memory IteratorA iterator_A, ///< iterator over B operand in global memory IteratorB iterator_B, ///< initial value of accumulator FragmentC const& src_accum) { // // Prologue // if (!prologue_done_) { _prologue<true, true>( iterator_A, iterator_B, gemm_k_iterations, smem_iterator_A_, smem_iterator_B_); } else if (!kSmemContainsEntireMat) { _prologue<false, false>( iterator_A, iterator_B, gemm_k_iterations, smem_iterator_A_, smem_iterator_B_); } else { gemm_k_iterations -= kNumStagesConcurrentLoad; } // Perform accumulation in the 'd' output operand accum = src_accum; // // Clear the remaining tiles of SMEM. This is a functional requirement for // some kernels so that all accumulator elements outside the GEMM footprint // are zero. // if (SharedMemoryClear == SharedMemoryClearOption::kClearLastStage) { /// Iterator to write threadblock-scoped tile of A operand to shared /// memory SmemIteratorA last_smem_iterator_A(this->smem_iterator_A_); typename IteratorA::AccessType zero_A; zero_A.clear(); last_smem_iterator_A.set_iteration_index(0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { typename IteratorA::AccessType* dst_ptr = reinterpret_cast<typename IteratorA::AccessType*>( last_smem_iterator_A.get()); *dst_ptr = zero_A; ++last_smem_iterator_A; } /// Iterator to write threadblock-scoped tile of B operand to shared /// memory SmemIteratorB last_smem_iterator_B(this->smem_iterator_B_); typename IteratorB::AccessType zero_B; zero_B.clear(); last_smem_iterator_B.set_iteration_index(0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { typename IteratorB::AccessType* dst_ptr = reinterpret_cast<typename IteratorB::AccessType*>( last_smem_iterator_B.get()); *dst_ptr = zero_B; ++last_smem_iterator_B; } } // Waits until kStages-2 stages have committed. cutlass::arch::cp_async_wait<kNumStagesConcurrentLoad - 1>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA warp_loaded_frag_A[2]; WarpLoadedFragmentB warp_loaded_frag_B[2]; WarpTransformedFragmentA warp_transformed_frag_A[2]; WarpTransformedFragmentB warp_transformed_frag_B[2]; Operator warp_mma; this->warp_tile_iterator_A_.set_kgroup_index(0); this->warp_tile_iterator_B_.set_kgroup_index(0); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma.transform( warp_transformed_frag_A[0], warp_transformed_frag_B[0], warp_loaded_frag_A[0], warp_loaded_frag_B[0]); // tf32x3 kernels use staging accumulation. warp_mma uses a temporary // accumulator and this temporary accumulator is added to the final // accumulator once in every mainloop iteration. plus<FragmentC> plus_accum; FragmentC tmp_accum; if (platform::is_same< typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same< typename Operator::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { tmp_accum.clear(); } // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations > (-kNumStagesConcurrentLoad);) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_A_.set_kgroup_index( (warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_B_.set_kgroup_index( (warp_mma_k + 1) % Base::kWarpGemmIterations); // In case of a non-circular buffer ("kSmemContainsEntireMat") // make sure we don't load out of bounds data. if (!kSmemContainsEntireMat || gemm_k_iterations > (-kNumStagesConcurrentLoad) || warp_mma_k < Base::kWarpGemmIterations - 1) { this->warp_tile_iterator_A_.load( warp_loaded_frag_A[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B_.load( warp_loaded_frag_B[(warp_mma_k + 1) % 2]); } ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B_; if (warp_mma_k > 0) warp_mma.transform( warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], warp_loaded_frag_A[warp_mma_k % 2], warp_loaded_frag_B[warp_mma_k % 2]); if (platform::is_same< typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same< typename Operator::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { warp_mma( tmp_accum, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], tmp_accum); if (warp_mma_k == 0) { accum = plus_accum(accum, tmp_accum); tmp_accum.clear(); } } else { warp_mma( accum, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], accum); } // Issue global->shared copies for the this stage if (!kSmemContainsEntireMat && warp_mma_k < Base::kWarpGemmIterations - 1) { int group_start_iteration_A, group_start_iteration_B; group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA; group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB; copy_tiles_and_advance( iterator_A, iterator_B, group_start_iteration_A, group_start_iteration_B); } if (warp_mma_k + 2 == Base::kWarpGemmIterations) { if (!kSmemContainsEntireMat) { int group_start_iteration_A, group_start_iteration_B; group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA; group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB; copy_tiles_and_advance( iterator_A, iterator_B, group_start_iteration_A, group_start_iteration_B); } // Inserts a memory fence between stages of cp.async instructions. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages have committed. cutlass::arch::cp_async_wait<kNumStagesConcurrentLoad - 1>(); __syncthreads(); // Move to the next stage iterator_A.add_tile_offset({0, 1}); iterator_B.add_tile_offset({1, 0}); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (!kSmemContainsEntireMat && smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_A_.add_tile_offset( {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); this->warp_tile_iterator_B_.add_tile_offset( {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } --gemm_k_iterations; iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); } // Do any conversions feeding the first stage at the end of the loop so // we can start right away on mma instructions if (warp_mma_k + 1 == Base::kWarpGemmIterations) warp_mma.transform( warp_transformed_frag_A[(warp_mma_k + 1) % 2], warp_transformed_frag_B[(warp_mma_k + 1) % 2], warp_loaded_frag_A[(warp_mma_k + 1) % 2], warp_loaded_frag_B[(warp_mma_k + 1) % 2]); } } if (platform::is_same< typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same< typename Operator::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { accum = plus_accum(accum, tmp_accum); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
examples/41_fused_multi_head_attention/gemm/custom_mma_multistage.h/0
{ "file_path": "examples/41_fused_multi_head_attention/gemm/custom_mma_multistage.h", "repo_id": "examples", "token_count": 11480 }
8
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# from typing import * import helper import gen_ir import gen_kernel as gen_ker class gen_device: def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, cutlass_deps_root, project_root, output_dir = "../"): self.fuse_gemm_info = fuse_gemm_info self.raw_gemm_info = fuse_gemm_info self.b2b_num = len(fuse_gemm_info) self.user_header_file = user_header_file self.args = {} # device arg struct memebr self.arg_member = [] self.gen_class_name = gen_class_name self.gen_kernel_name = gen_class_name + "Kernel" self.template_args = [] self.__tempalate_arg_list = {'Stages': int, 'SplitKSerial': bool, 'IsBetaZero': bool, 'AlignmentA': int, 'AlignmentB': int} self.file_name = output_dir + "/device/" +gen_class_name +".h" self.sample_dir = output_dir self.cutlass_deps_root = cutlass_deps_root self.project_root = project_root self.this_file_root = output_dir + "/device/" self.first_use_1stage = False ## gen kernel self.gen_kernel = gen_ker.gen_kernel(self.template_args, self.gen_class_name, self.b2b_num, output_dir, cutlass_deps_root, project_root) def __check_arg_type(self, temp_arg): if temp_arg in self.__tempalate_arg_list.keys(): return self.__tempalate_arg_list[temp_arg] find_sub = False for candidate_arg in self.__tempalate_arg_list.keys(): if (temp_arg.find(candidate_arg) != -1): return self.__tempalate_arg_list[candidate_arg] return 'typename' # def gen_B2b2bGemm_class(): def set_arch(self, sm_cap, mma_tp): if sm_cap == 75 or sm_cap == 80 or sm_cap == 86: self.arch = "cutlass::arch::Sm" + str(sm_cap) if mma_tp is 'hmma1688': self.mma_shape = [16, 8, 8] self.mma_tp = 'hmma' elif mma_tp is 'imma8816': self.mma_tp = 'imma' self.mma_shape = [8, 8, 16] else: return 0 def gen_include_header(self): code = '''\ /* Auto Generated code - Do not edit.*/ #pragma once #include \"{cutlass_root}cutlass/cutlass.h\" #include \"{cutlass_root}cutlass/numeric_types.h\" #include \"{cutlass_root}cutlass/arch/arch.h\" #include \"{cutlass_root}cutlass/device_kernel.h\" #include \"{cutlass_root}cutlass/gemm/threadblock/threadblock_swizzle.h\" #include \"{cutlass_root}cutlass/gemm/device/default_gemm_configuration.h\" #include \"{cutlass_root}cutlass/epilogue/thread/linear_combination_relu.h\" #include \"{cutlass_root}cutlass/epilogue/thread/linear_combination.h\" #include \"{project_root}../kernel/b2b_gemm.h\" #include \"{project_root}../kernel/default_b2b_gemm.h\" '''.format(cutlass_root=self.cutlass_deps_root, project_root=self.project_root, this_file_root=self.this_file_root) include_user_header = "" for header in self.user_header_file: include_user_header += "#include \"" + header + "\"\n" return code + include_user_header def gen_code(self, sm_cap, mma_tp, ifprint = True): self.set_arch(sm_cap, mma_tp) self.update_b2b_args() print(self.fuse_gemm_info) self.update_b2b_class_template_args() func_code = self.gen_all_func() member_var_code = "private:\n typename B2bGemmKernel::Params params_;\n" gen_code = gen_ir.gen_template_class(self.gen_class_name, self.template_args, func_code + member_var_code) code = self.gen_include_header() + gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("device", gen_code))) if ifprint: print(code) print("[INFO]: Gen device code output Dir: is ", self.file_name) with open(self.file_name, 'w+') as f: f.write(code) gen_kernel = self.gen_kernel.gen_code(self.first_use_1stage) print(gen_kernel) def update_b2b_class_template_args(self): for arg in self.args.keys(): self.template_args.append([self.__check_arg_type(arg), arg, self.args[arg]]) def update_b2b_args(self): self.args['ElementA'] = helper.type_2_cutlass_type(self.fuse_gemm_info[0]['A_tp']) self.args['LayoutA'] = helper.type_2_cutlass_type(self.fuse_gemm_info[0]['A_format']) cnt = 0 warp_M_tile = 32 # Determine maxmimum N_tile Max_Ntile = 0 for layer in self.fuse_gemm_info: n_tile = layer['mnk'][1] if n_tile > Max_Ntile: Max_Ntile = n_tile if Max_Ntile >= 256: warp_M_tile = 16 stages_temp = [] for layer in self.fuse_gemm_info: cnt_str = str(cnt) B_tp_str= 'ElementB' + cnt_str B_format_str = 'LayoutB' + cnt_str C_tp_str= 'ElementC' + cnt_str C_format_str = 'LayoutC' + cnt_str Acc_str = 'ElementAccumulator' + cnt_str self.args[B_tp_str] = helper.type_2_cutlass_type(layer['B_tp']) self.args[B_format_str] = helper.type_2_cutlass_type(layer['B_format']) self.args[C_tp_str] = helper.type_2_cutlass_type(layer['C_tp']) self.args[C_format_str] = helper.type_2_cutlass_type(layer['C_format']) self.args[Acc_str] = helper.type_2_cutlass_type(layer['Acc_tp']) mnk = layer['mnk'][:] tile_mnk = mnk[:] tile_mnk[2] = 32 # force the ktile is 32 #N tile gen if mnk[1] > 1024: assert(0) elif mnk[1] > 512: tile_mnk[1] = 1024 elif mnk[1] > 256: tile_mnk[1] = 512 elif mnk[1] > 128: tile_mnk[1] = 256 elif mnk[1] > 64: tile_mnk[1] = 128 elif mnk[1] > 32: tile_mnk[1] = 64 else : tile_mnk[1] = 32 if tile_mnk[1] == 512: stages_temp.append(1) else: stages_temp.append(2) tile_mnk[0] = 4 * warp_M_tile epilogue_setted_type = helper.get_epilogue_tp(layer) cutlass_epilogue_name = "LinearCombinationRelu" if epilogue_setted_type.lower() == 'leakyrelu': cutlass_epilogue_name = "LinearCombinationLeakyRelu" elif epilogue_setted_type.lower() == 'identity': cutlass_epilogue_name = "LinearCombination" epilogue_str = 'EpilogueOutputOp' + cnt_str if cnt != len(self.fuse_gemm_info) - 1: n = layer['mnk'][1] Fragments = tile_mnk[1] // 8 * 2 self.args[epilogue_str] = "cutlass::epilogue::thread::" + cutlass_epilogue_name + "<ElementC0_, " + str(Fragments) +", ElementAccumulator0_, ElementAccumulator0_>" else: n = layer['mnk'][1] n_mod_8 = n % 4 N_align_elements = 1 if n_mod_8 == 0: N_align_elements = 8 elif n_mod_8 == 4: N_align_elements = 4 elif n_mod_8 == 2 or n_mod_8 == 6: N_align_elements = 2 self.args[epilogue_str] = "cutlass::epilogue::thread::" + cutlass_epilogue_name+ "<ElementC0_, " + str(N_align_elements) + ", ElementAccumulator0_, ElementAccumulator0_>" ThreadBlockShape_str = 'ThreadblockShape' + cnt_str self.args[ThreadBlockShape_str] = helper.cvt_2_cutlass_shape(tile_mnk) WarpShape_str = 'WarpShape' + cnt_str tile_mnk[0] = warp_M_tile self.args[WarpShape_str] = helper.cvt_2_cutlass_shape(tile_mnk) cnt += 1 self.args['ElementD'] = helper.type_2_cutlass_type(self.fuse_gemm_info[self.b2b_num - 1]['C_tp']) self.args['LayoutD'] = helper.type_2_cutlass_type(self.fuse_gemm_info[self.b2b_num - 1]['C_format']) self.args['InstructionShape'] = helper.cvt_2_cutlass_shape(self.mma_shape) self.args['OperatorClass'] = 'arch::OpClassTensorOp' self.args['ArchTag'] = self.arch self.args['ThreadblockSwizzle'] = 'threadblock::GemmBatchedIdentityThreadblockSwizzle' for i in range(self.b2b_num): self.args[helper.var_idx('Stages', i)] = "2" self.args['AlignmentA'] = str(8) self.args['AlignmentB'] = str(8) self.args['SplitKSerial'] = 'false' self.args['Operator'] = 'typename DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB0_, ElementC0_, ElementAccumulator0_>::Operator' self.args['IsBetaZero'] = 'false' def gen_using_kernel(self): code = "using B2bGemmKernel = typename kernel::DefaultB2bGemm<\n" code += " " + "ElementA,\n" code += " " + "LayoutA,\n" for i in range(self.b2b_num): code += " " + helper.var_idx("ElementB", i) + ",\n" code += " " + helper.var_idx("LayoutB", i) + ",\n" code += " " + helper.var_idx("ElementC", i) + ",\n" code += " " + helper.var_idx("LayoutC", i) + ",\n" code += " " + helper.var_idx("ElementAccumulator", i) + ",\n" code += " " + helper.var_idx("EpilogueOutputOp", i) + ",\n" code += " " + helper.var_idx("ThreadblockShape", i) + ",\n" code += " " + helper.var_idx("WarpShape", i) + ",\n" code += " " + "ElementD,\n" code += " " + "LayoutD,\n" code += " " + "InstructionShape,\n" code += " " + "OperatorClass,\n" code += " " + "ArchTag,\n" code += " " + "ThreadblockSwizzle,\n" for i in range(self.b2b_num): code += " " + helper.var_idx("Stages", i) + ",\n" code += " " + "AlignmentA,\n" code += " " + "AlignmentB,\n" code += " " + "SplitKSerial,\n" code += " " + "Operator,\n" code += " " + "IsBetaZero_\n" code += ">::B2bGemmKernel;\n\n" return code def gen_args(self): def gen_arg_member(b2b_num): data_members = [] for i in range(b2b_num): member_type = "GemmCoord" member_name = "problem_size_" + str(i) data_members.append((member_type, member_name)) member_type = "TensorRef<ElementA const, LayoutA>" member_name = "ref_A0" data_members.append((member_type, member_name)) for i in range(b2b_num): member_type = "TensorRef<ElementB" + str(i) + " const, LayoutB" + str(i) +">" member_name = "ref_B" + str(i) data_members.append((member_type, member_name)) member_type = "TensorRef<ElementC" + str(i) + " const, LayoutC" + str(i) +">" member_name = "ref_C" + str(i) data_members.append((member_type, member_name)) member_type = "TensorRef<ElementD, LayoutD>" member_name = helper.var_idx("ref_D", b2b_num - 1) data_members.append((member_type, member_name)) for i in range(b2b_num): member_type = "typename EpilogueOutputOp" + str(i) + "::Params" member_name = "epilogue" + str(i) data_members.append((member_type, member_name)) data_members.append(('int', 'batch_count')) return data_members def gen_arg_struct_default_ctor(struct_name, data_members, inital_param_num, inital_value): constructs_code = gen_ir.indentation + "CUTLASS_HOST_DEVICE\n" + \ gen_ir.indentation + struct_name + " (): " for i in range(inital_param_num): final_param = ',' if i == inital_param_num - 1: final_param = '{ }' constructs_code += data_members[i][1] + inital_value + final_param constructs_code += "\n" return constructs_code def gen_arg_struct_ctor(struct_name, data_members): constructs_code = gen_ir.indentation + "CUTLASS_HOST_DEVICE\n" + \ gen_ir.indentation + struct_name + " (\n" cnt = 0 param_num = len(data_members) for param in data_members: final = ',\n' if cnt == param_num - 1: final = '\n):\n' constructs_code += gen_ir.indentation + param[0] + " " + param[1] + "_" + final cnt += 1 cnt = 0 for param in data_members: final = '),\n' if cnt == param_num - 1: final = ") { }\n" constructs_code += gen_ir.indentation + param[1] + "(" + param[1] + "_" + final cnt += 1 constructs_code += "\n" return constructs_code # (variable type, variable name) struct_member = gen_arg_member(self.b2b_num) self.arg_member = struct_member codeBody = "" for each_member in struct_member: codeBody += gen_ir.indentation + each_member[0] + " " + each_member[1] + ";\n" codeBody += gen_arg_struct_default_ctor("Arguments", struct_member, self.b2b_num, "(0,0,0)") + "\n" codeBody += gen_arg_struct_ctor("Arguments", struct_member) + "\n" struct_code = gen_ir.gen_struct("Arguments", codeBody) return struct_code def gen_func_constructs(self): code = self.gen_class_name +"() {}" return code def gen_func_initialize(self): code = "Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {\n" + \ "// Determine grid shape\n" + \ "ThreadblockSwizzle threadblock_swizzle;\n" + \ "cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(\n" + \ " args.problem_size_0, \n" + \ " { ThreadblockShape0::kM, ThreadblockShape0::kN, ThreadblockShape0::kK },\n" + \ " args.batch_count);\n" + \ "// Initialize the Params structure\n" + \ "params_ = typename B2bGemmKernel::Params{\n" for i in range(self.b2b_num): code += helper.var_idx(" args.problem_size_", i) + ",\n" code += " grid_shape,\n" + \ " args.ref_A0.non_const_ref(),\n" for i in range(self.b2b_num): code += helper.var_idx(" args.ref_B", i) + ".non_const_ref(),\n" code += helper.var_idx(" args.ref_C", i) + ".non_const_ref(),\n" code += helper.var_idx(" args.ref_D", self.b2b_num - 1) + ",\n" for i in range(self.b2b_num): code += helper.var_idx(" args.epilogue", i) + ",\n" code += " args.batch_count\n" code += "};\n" + \ "return Status::kSuccess;\n" + \ "}\n" return code def gen_func_run(self): code = "Status run(cudaStream_t stream = nullptr) {\n" + \ "\n" + \ " ThreadblockSwizzle threadblock_swizzle;\n" + \ "\n" + \ " dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);\n" + \ " dim3 block(B2bGemmKernel::kThreadCount, 1, 1);\n" + \ "\n" + \ " cudaError_t result;\n" + \ "\n" + \ " int smem_size = int(sizeof(typename B2bGemmKernel::SharedStorage));\n" + \ " if (smem_size >= (48 << 10)) {\n" + \ " result = cudaFuncSetAttribute(Kernel<B2bGemmKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size);\n" + \ "\n" + \ " if (result != cudaSuccess) {\n" + \ " return Status::kErrorInternal;\n" + \ " }\n" + \ " }\n" + \ " cutlass::Kernel<B2bGemmKernel><<<grid, block, smem_size, stream>>>(params_);\n" + \ " result = cudaGetLastError();\n" + \ " return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;\n" + \ " }\n" return code def gen_func_operator(self): opeartor_with_arg_code = "Status operator()(\n" + \ " Arguments const &args,\n" + \ " void *workspace = nullptr,\n" + \ " cudaStream_t stream = nullptr) {\n" + \ " Status status = initialize(args, workspace);\n" + \ " \n" + \ " if (status == Status::kSuccess) {\n" + \ " status = run(stream);\n" + \ " }\n" + \ " return status;\n" + \ "}\n" operator_code = "Status operator()(\n" + \ " cudaStream_t stream = nullptr) {\n" + \ " Status status = run(stream);\n" + \ " return status;\n" + \ "}\n" return opeartor_with_arg_code + "\n" + operator_code def gen_all_func(self): return self.gen_using_kernel() + "\n" + \ self.gen_args() + "\n" + \ self.gen_func_constructs() + "\n" + \ self.gen_func_initialize() + "\n" + \ self.gen_func_run() + "\n" + \ self.gen_func_operator()
examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_device.py/0
{ "file_path": "examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_device.py", "repo_id": "examples", "token_count": 9941 }
9
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <iostream> #include <fstream> #include <sstream> #include <type_traits> #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/distribution.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/device/tensor_relu.h" #include "cutlass/platform/platform.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/device/gemm_universal.h" #include "dual_gemm_common.h" #include "helper.h" #define CHECK_GT(val1, val2) \ if((val1) <= (val2)) \ std::cerr << __FILE__ << " " << __LINE__ << ": CHECK_GT failed\n"; #define CHECK_TRUE(val) \ if(!(val)) \ std::cerr << __FILE__ << " " << __LINE__ << ": CHECK_TRUE failed\n"; template < typename OutputOp, typename Element, typename Layout> struct TensorEpilogueForEachFunc { /// View type using TensorView = cutlass::TensorView<Element, Layout>; /// Coordinate in tensor's index space using TensorCoord = typename TensorView::TensorCoord; /// Parameters structure struct Params { // // Data members // TensorView view_x0; TensorView view_x1; TensorView view_y; OutputOp output_op; // // Methods // Params( TensorView view_x0_ = TensorView(), TensorView view_x1_ = TensorView(), TensorView view_y_ = TensorView(), OutputOp output_op_ = OutputOp(typename OutputOp::Params{}) ): view_x0(view_x0_), view_x1(view_x1_), view_y(view_y_), output_op(output_op_) { } }; Params params; CUTLASS_DEVICE TensorEpilogueForEachFunc(Params const &params): params(params) { } CUTLASS_DEVICE void operator()(TensorCoord const &coord) { Element const & x0 = params.view_x0.at(coord); Element const & x1 = params.view_x1.at(coord); Element& y = params.view_y.at(coord); y = params.output_op(x0, x1); } }; template < typename OutputOp, typename Element, typename Layout> void TensorEpilogueForEach( cutlass::TensorView<Element, Layout> x0, cutlass::TensorView<Element, Layout> x1, cutlass::TensorView<Element, Layout> y) { using Func = TensorEpilogueForEachFunc<OutputOp, Element, Layout>; using Params = typename Func::Params; cutlass::reference::device::TensorForEach<Func, Layout::kRank, Params>( y.extent(), Params(x0, x1, y) ); } //////////////////////////////////////////////////////////////////////////////// template <typename Gemm0_, typename Gemm1_> struct NonFusedDualGemmRun { using Gemm0 = Gemm0_; using Gemm1 = Gemm1_; using ElementAccumulator = typename Gemm0::ElementAccumulator; using ElementCompute = typename Gemm0::GemmKernel::Epilogue::OutputOp::ElementCompute; /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; cutlass::Distribution::Kind init_Bias; uint64_t seed; // // Methods // NonFusedDualGemmRun( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_Bias_ = cutlass::Distribution::Uniform, uint64_t seed_ = 2080 ): init_A(init_A_), init_B(init_B_), init_C(init_C_), init_Bias(init_Bias_), seed(seed_) { } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { cutlass::reference::host::TensorFillRandomUniform( view, seed, 2, -2, 0); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential( view.data(), view.capacity()); } else if (dist_kind == cutlass::Distribution::AllZeros) { cutlass::reference::host::TensorFill(view, Element(0)); } else if (dist_kind == cutlass::Distribution::AllOnes) { cutlass::reference::host::TensorFill(view, Element(1)); } else { std::cerr << "Not implemented\n"; return false; } return true; } /// Executes one test bool run( cutlass::gemm::GemmCoord problem_size, ElementCompute alpha0 = ElementCompute(1), ElementCompute beta0 = ElementCompute(0), ElementCompute alpha1 = ElementCompute(1), ElementCompute beta1 = ElementCompute(0), bool is_profiling = true, bool relu = false, int warm_ups = 1, int runs = 100) { // // Allocate the GEMM workspace // cutlass::HostTensor< typename Gemm0::ElementA, typename Gemm0::LayoutA> tensor_A0(problem_size.mk()); cutlass::HostTensor< typename Gemm0::ElementB, typename Gemm0::LayoutB> tensor_B0(problem_size.kn()); cutlass::HostTensor< typename Gemm0::ElementC, typename Gemm0::LayoutC> tensor_C0(problem_size.mn()); cutlass::HostTensor< typename Gemm1::ElementC, typename Gemm0::LayoutC> tensor_Bias0({1, problem_size.n()}); cutlass::HostTensor< typename Gemm0::ElementC, typename Gemm0::LayoutC> tensor_D0(problem_size.mn()); cutlass::HostTensor< typename Gemm0::ElementC, typename Gemm0::LayoutC> reference_D0(problem_size.mn()); cutlass::HostTensor< typename Gemm1::ElementB, typename Gemm1::LayoutB> tensor_B1(problem_size.kn()); cutlass::HostTensor< typename Gemm1::ElementC, typename Gemm1::LayoutC> tensor_C1(problem_size.mn()); cutlass::HostTensor< typename Gemm1::ElementC, typename Gemm1::LayoutC> tensor_Bias1({1, problem_size.n()}); cutlass::HostTensor< typename Gemm1::ElementC, typename Gemm1::LayoutC> tensor_D1(problem_size.mn()); cutlass::HostTensor< typename Gemm1::ElementC, typename Gemm1::LayoutC> reference_D1(problem_size.mn()); CHECK_TRUE(initialize_tensor(tensor_A0.host_view(), init_A, seed + 2019)); CHECK_TRUE(initialize_tensor(tensor_B0.host_view(), init_B, seed + 2018)); CHECK_TRUE(initialize_tensor(tensor_C0.host_view(), init_C, seed + 2017)); CHECK_TRUE(initialize_tensor(tensor_Bias0.host_view(), init_Bias, seed + 2014)); CHECK_TRUE(initialize_tensor(tensor_B1.host_view(), init_B, seed + 2016)); CHECK_TRUE(initialize_tensor(tensor_C1.host_view(), init_C, seed + 2015)); CHECK_TRUE(initialize_tensor(tensor_Bias1.host_view(), init_Bias, seed + 2013)); cutlass::reference::host::TensorFill( tensor_D0.host_view()); cutlass::reference::host::TensorFill( tensor_D1.host_view()); cutlass::reference::host::TensorFill( reference_D0.host_view()); cutlass::reference::host::TensorFill( reference_D1.host_view()); tensor_A0.sync_device(); tensor_B0.sync_device(); tensor_C0.sync_device(); tensor_Bias0.sync_device(); tensor_D0.sync_device(); reference_D0.sync_device(); tensor_B1.sync_device(); tensor_C1.sync_device(); tensor_Bias1.sync_device(); tensor_D1.sync_device(); reference_D1.sync_device(); // // Initialize the GEMM operator // int split_k_slices = Gemm0::kSplitKSerial ? 2 : 1; typename Gemm0::Arguments arguments_0{ problem_size, tensor_A0.device_ref(), tensor_B0.device_ref(), {tensor_Bias0.device_data(), typename Gemm0::LayoutC::Stride(0)}, tensor_D0.device_ref(), {alpha0, beta0}, split_k_slices }; split_k_slices = Gemm1::kSplitKSerial ? 2 : 1; typename Gemm1::Arguments arguments_1{ problem_size, tensor_A0.device_ref(), tensor_B1.device_ref(), {tensor_Bias1.device_data(), typename Gemm1::LayoutC::Stride(0)}, tensor_D1.device_ref(), {alpha1, beta1}, split_k_slices }; Gemm0 gemm_op_0; Gemm1 gemm_op_1; // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace0(gemm_op_0.get_workspace_size(arguments_0)); cutlass::device_memory::allocation<uint8_t> workspace1(gemm_op_1.get_workspace_size(arguments_1)); cutlass::Status status = gemm_op_0.initialize(arguments_0, workspace0.get()); CUTLASS_CHECK(status); status = gemm_op_1.initialize(arguments_1, workspace1.get()); CUTLASS_CHECK(status); for(int i = 0; i < warm_ups; i++) { status = gemm_op_0(); CUTLASS_CHECK(status); status = gemm_op_1(); CUTLASS_CHECK(status); } if (is_profiling) { // // Profile the GEMM // cudaEvent_t start, stop1, stop2; cudaEventCreate(&start); cudaEventCreate(&stop1); cudaEventCreate(&stop2); cudaEventRecord(start); for(int i = 0; i < runs; i++) { status = gemm_op_0(); CUTLASS_CHECK(status); } cudaEventRecord(stop1); for(int i = 0; i < runs; i++) { status = gemm_op_1(); CUTLASS_CHECK(status); } cudaEventRecord(stop2); cudaDeviceSynchronize(); float gemm0Time, gemm1Time, totalTime; cudaEventElapsedTime(&gemm0Time, start, stop1); cudaEventElapsedTime(&gemm1Time, stop1, stop2); cudaEventElapsedTime(&totalTime, start, stop2); std::cout << "gemm 0 time " << gemm0Time / (float)runs << " ms\n"; std::cout << "gemm 1 time " << gemm1Time / (float)runs << " ms\n"; std::cout << "Non-fusion GEMM only time " << totalTime / (float)runs << " ms\n"; } tensor_D0.sync_host(); tensor_D1.sync_host(); // // Verify // cutlass::reference::device::Gemm< typename Gemm0::ElementA, typename Gemm0::LayoutA, typename Gemm0::ElementB, typename Gemm0::LayoutB, typename Gemm0::ElementC, typename Gemm0::LayoutC, ElementCompute, ElementAccumulator, typename Gemm0::Operator> reference_gemm_0; cutlass::reference::device::Gemm< typename Gemm1::ElementA, typename Gemm1::LayoutA, typename Gemm1::ElementB, typename Gemm1::LayoutB, typename Gemm1::ElementC, typename Gemm1::LayoutC, ElementCompute, ElementAccumulator, typename Gemm1::Operator> reference_gemm_1; reference_gemm_0( problem_size, alpha0, tensor_A0.device_ref(), tensor_B0.device_ref(), beta0, {tensor_Bias0.device_data(), typename Gemm0::LayoutC::Stride(0)}, reference_D0.device_ref() ); if(relu) { cutlass::reference::device::TensorReLu(reference_D0.device_view()); } reference_gemm_1( problem_size, alpha1, tensor_A0.device_ref(), tensor_B1.device_ref(), beta1, {tensor_Bias1.device_data(), typename Gemm1::LayoutC::Stride(0)}, reference_D1.device_ref() ); if(relu) { cutlass::reference::device::TensorReLu(reference_D1.device_view()); } // Wait for kernels to finish cudaDeviceSynchronize(); reference_D0.sync_host(); reference_D1.sync_host(); CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D0.host_view()), 0); CHECK_GT(cutlass::reference::host::TensorNorm(reference_D0.host_view()), 0); CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D1.host_view()), 0); CHECK_GT(cutlass::reference::host::TensorNorm(reference_D1.host_view()), 0); bool passed0 = cutlass::reference::host::TensorEquals( reference_D1.host_view(), tensor_D1.host_view()); CHECK_TRUE(passed0); bool passed1 = cutlass::reference::host::TensorEquals( reference_D1.host_view(), tensor_D1.host_view()); CHECK_TRUE(passed1); if (!passed0 || !passed1) { std::stringstream fname; fname << "error_DualGemm_device_nonfused.txt"; std::cerr << "Dumping results in " << fname.str() << "\n"; std::ofstream file(fname.str()); file << "A0 =\n" << tensor_A0.host_view() << "\nB0 =\n" << tensor_B0.host_view() << "\nC0 =\n" << tensor_C0.host_view() << "\nBias0:\n" << tensor_Bias0.host_view() << "\n" << "\nD0 =\n" << tensor_D0.host_view() << "\nB1 =\n" << tensor_B1.host_view() << "\nC1 =\n" << tensor_C1.host_view() << "\nBias1:\n" << tensor_Bias1.host_view() << "\n" << "\n\nReference =\n" << reference_D1.host_view() << "\nComputed =\n" << tensor_D1.host_view(); } return passed0 && passed1; } }; template <typename DualGemm_> struct DualFusedGemmRun { using DualGemm = DualGemm_; using ElementAccumulator = typename DualGemm::ElementAccumulator; using ElementCompute = typename DualGemm::DualGemmKernel::Epilogue0::OutputOp::ElementCompute; using EpilogueOutputOp2 = typename DualGemm::EpilogueOutputOp2; /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; cutlass::Distribution::Kind init_Scale; cutlass::Distribution::Kind init_Bias; uint64_t seed; // // Methods // DualFusedGemmRun( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_Scale_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_Bias_ = cutlass::Distribution::Uniform, uint64_t seed_ = 2080 ): init_A(init_A_), init_B(init_B_), init_C(init_C_), init_Scale(init_Scale_), init_Bias(init_Bias_), seed(seed_) { } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { cutlass::reference::host::TensorFillRandomUniform( view, seed, 2, -2, 0); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential( view.data(), view.capacity()); } else if (dist_kind == cutlass::Distribution::AllZeros) { cutlass::reference::host::TensorFill(view, Element(0)); } else if (dist_kind == cutlass::Distribution::AllOnes) { cutlass::reference::host::TensorFill(view, Element(1)); } else { std::cerr << "Not implemented\n"; return false; } return true; } /// Executes one test bool run( cutlass::gemm::GemmCoord problem_size, ElementCompute alpha0 = ElementCompute(1), ElementCompute beta0 = ElementCompute(1), ElementCompute alpha1 = ElementCompute(1), ElementCompute beta1 = ElementCompute(1), int batch_count = 1, bool broadcast_b1 = false, bool is_profiling = true, bool relu = false, int warm_ups = 1, int runs = 100) { // // Allocate the GEMM workspace // cutlass::HostTensor< typename DualGemm::ElementA, typename DualGemm::LayoutA> tensor_A0( cutlass::platform::is_same<typename DualGemm::LayoutA, cutlass::layout::RowMajor>::value ? cutlass::MatrixCoord(batch_count * problem_size.m(), problem_size.k()) : cutlass::MatrixCoord(problem_size.m(), batch_count * problem_size.k())); cutlass::HostTensor< typename DualGemm::ElementB, typename DualGemm::LayoutB0> tensor_B0( cutlass::platform::is_same<typename DualGemm::LayoutB0, cutlass::layout::RowMajor>::value ? cutlass::MatrixCoord(batch_count * problem_size.k(), problem_size.n()) : cutlass::MatrixCoord(problem_size.k(), batch_count * problem_size.n())); cutlass::HostTensor< typename DualGemm::ElementC, typename DualGemm::LayoutC> tensor_C0( cutlass::platform::is_same<typename DualGemm::LayoutC, cutlass::layout::RowMajor>::value ? cutlass::MatrixCoord(batch_count * problem_size.m(), problem_size.n()) : cutlass::MatrixCoord(problem_size.m(), batch_count * problem_size.n())); cutlass::HostTensor< typename DualGemm::ElementC, typename DualGemm::LayoutScaleBias> tensor_Bias0({batch_count, problem_size.n()}); cutlass::HostTensor< typename DualGemm::ElementC, typename DualGemm::LayoutC> tensor_D0( cutlass::platform::is_same<typename DualGemm::LayoutC, cutlass::layout::RowMajor>::value ? cutlass::MatrixCoord(batch_count * problem_size.m(), problem_size.n()) : cutlass::MatrixCoord(problem_size.m(), batch_count * problem_size.n())); cutlass::HostTensor< typename DualGemm::ElementC, typename DualGemm::LayoutC> reference_D0( cutlass::platform::is_same<typename DualGemm::LayoutC, cutlass::layout::RowMajor>::value ? cutlass::MatrixCoord(batch_count * problem_size.m(), problem_size.n()) : cutlass::MatrixCoord(problem_size.m(), batch_count * problem_size.n())); cutlass::HostTensor< typename DualGemm::ElementB, typename DualGemm::LayoutB1> tensor_B1( cutlass::platform::is_same<typename DualGemm::LayoutB1, cutlass::layout::RowMajor>::value ? cutlass::MatrixCoord(batch_count * problem_size.k(), problem_size.n()) : cutlass::MatrixCoord(problem_size.k(), batch_count * problem_size.n())); if (broadcast_b1) { tensor_B1.resize({problem_size.k(), batch_count}); } cutlass::HostTensor< typename DualGemm::ElementC, typename DualGemm::LayoutC> tensor_C1( cutlass::platform::is_same<typename DualGemm::LayoutC, cutlass::layout::RowMajor>::value ? cutlass::MatrixCoord(batch_count * problem_size.m(), problem_size.n()) : cutlass::MatrixCoord(problem_size.m(), batch_count * problem_size.n())); cutlass::HostTensor< typename DualGemm::ElementC, typename DualGemm::LayoutScaleBias> tensor_Bias1({batch_count, problem_size.n()}); cutlass::HostTensor< typename DualGemm::ElementC, typename DualGemm::LayoutC> tensor_D1( cutlass::platform::is_same<typename DualGemm::LayoutC, cutlass::layout::RowMajor>::value ? cutlass::MatrixCoord(batch_count * problem_size.m(), problem_size.n()) : cutlass::MatrixCoord(problem_size.m(), batch_count * problem_size.n())); cutlass::HostTensor< typename DualGemm::ElementC, typename DualGemm::LayoutC> tensor_D2( cutlass::platform::is_same<typename DualGemm::LayoutC, cutlass::layout::RowMajor>::value ? cutlass::MatrixCoord(batch_count * problem_size.m(), problem_size.n()) : cutlass::MatrixCoord(problem_size.m(), batch_count * problem_size.n())); cutlass::HostTensor< typename DualGemm::ElementC, typename DualGemm::LayoutC> reference_D1( cutlass::platform::is_same<typename DualGemm::LayoutC, cutlass::layout::RowMajor>::value ? cutlass::MatrixCoord(batch_count * problem_size.m(), problem_size.n()) : cutlass::MatrixCoord(problem_size.m(), batch_count * problem_size.n())); cutlass::HostTensor< typename DualGemm::ElementC, typename DualGemm::LayoutC> reference_D2( cutlass::platform::is_same<typename DualGemm::LayoutC, cutlass::layout::RowMajor>::value ? cutlass::MatrixCoord(batch_count * problem_size.m(), problem_size.n()) : cutlass::MatrixCoord(problem_size.m(), batch_count * problem_size.n())); CHECK_TRUE(initialize_tensor(tensor_A0.host_view(), init_A, seed + 2019)); CHECK_TRUE(initialize_tensor(tensor_B0.host_view(), init_B, seed + 2118)); CHECK_TRUE(initialize_tensor(tensor_C0.host_view(), init_C, seed + 2017)); CHECK_TRUE(initialize_tensor(tensor_Bias0.host_view(), init_Bias, seed + 2011)); CHECK_TRUE(initialize_tensor(tensor_B1.host_view(), init_B, seed + 2113)); CHECK_TRUE(initialize_tensor(tensor_C1.host_view(), init_C, seed + 2015)); CHECK_TRUE(initialize_tensor(tensor_Bias1.host_view(), init_Bias, seed + 2012)); cutlass::reference::host::TensorFill( tensor_D0.host_view()); cutlass::reference::host::TensorFill( tensor_D1.host_view()); cutlass::reference::host::TensorFill( tensor_D2.host_view()); cutlass::reference::host::TensorFill( reference_D0.host_view()); cutlass::reference::host::TensorFill( reference_D1.host_view()); cutlass::reference::host::TensorFill( reference_D2.host_view()); tensor_A0.sync_device(); tensor_B0.sync_device(); tensor_C0.sync_device(); tensor_Bias0.sync_device(); tensor_B1.sync_device(); tensor_C1.sync_device(); tensor_Bias1.sync_device(); tensor_D0.sync_device(); tensor_D1.sync_device(); tensor_D2.sync_device(); reference_D0.sync_device(); reference_D1.sync_device(); reference_D2.sync_device(); // // Batch strides (irrelevant when batch_count == 1) // int64_t batch_stride_A = problem_size.m() * problem_size.k(); int64_t batch_stride_B0 = problem_size.k() * problem_size.n(); int64_t batch_stride_B1 = problem_size.k() * problem_size.n(); if (broadcast_b1) { // B1 is a (column) vector batch_stride_B1 = problem_size.k(); } int64_t batch_stride_Bias = problem_size.n(); int64_t batch_stride_D = problem_size.m() * problem_size.n(); // // Initialize the GEMM operator // int split_k_slices = DualGemm::kSplitKSerial ? 2 : 1; typename cutlass::TensorRef<typename DualGemm::ElementC, typename DualGemm::LayoutC> nullptr_ref{}; decltype(nullptr_ref) ref_B0, ref_B1; if (beta0 != ElementCompute(0)) { ref_B0 = {tensor_Bias0.device_data(), typename DualGemm::LayoutC::Stride(0)}; } if (beta1 != ElementCompute(0)) { ref_B1 = {tensor_Bias1.device_data(), typename DualGemm::LayoutC::Stride(0)}; } typename DualGemm::Arguments arguments{ (batch_count > 1 ? cutlass::gemm::DualGemmMode::kBatched : cutlass::gemm::DualGemmMode::kGemm), problem_size, tensor_A0.device_ref(), tensor_B0.device_ref(), ref_B0, DualGemm::kStoreD0 ? tensor_D0.device_ref() : nullptr_ref, (broadcast_b1 ? typename DualGemm::TensorRefB1(tensor_B1.device_data(), 0) : tensor_B1.device_ref()), ref_B1, DualGemm::kStoreD1 ? tensor_D1.device_ref() : nullptr_ref, tensor_D2.device_ref(), {alpha0, beta0}, {alpha1, beta1}, {}, split_k_slices, batch_count, batch_stride_A, batch_stride_B0, batch_stride_B1, batch_stride_Bias, batch_stride_D, }; // // Run the GEMM // DualGemm b2b_gemm_op; cutlass::device_memory::allocation<uint8_t> workspace(b2b_gemm_op.get_workspace_size(arguments)); cutlass::Status status = b2b_gemm_op.can_implement(arguments); CUTLASS_CHECK(status); status = b2b_gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); for(int i = 0; i < warm_ups; i++) { status = b2b_gemm_op(); CUTLASS_CHECK(status); } if (is_profiling) { // // Profile the GEMM // cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); for(int i = 0; i < runs; i++) { status = b2b_gemm_op(); CUTLASS_CHECK(status); } cudaEventRecord(stop); cudaDeviceSynchronize(); float gemmTime; cudaEventElapsedTime(&gemmTime, start, stop); std::cout << "Fusion time " << gemmTime / (float)runs << " ms\n"; } tensor_D0.sync_host(); tensor_D1.sync_host(); tensor_D2.sync_host(); // // Verify // using GemmUniversal0 = cutlass::gemm::device::GemmUniversal< typename DualGemm::ElementA, typename DualGemm::LayoutA, typename DualGemm::ElementB, typename DualGemm::LayoutB0, typename DualGemm::ElementC, typename DualGemm::LayoutC, ElementAccumulator >; GemmUniversal0 reference_gemm0; typename GemmUniversal0::Arguments args0 { (batch_count > 1 ? cutlass::gemm::GemmUniversalMode::kBatched : cutlass::gemm::GemmUniversalMode::kGemm), problem_size, batch_count, {alpha0, beta0}, tensor_A0.device_data(), tensor_B0.device_data(), tensor_Bias0.device_data(), reference_D0.device_data(), batch_stride_A, batch_stride_B0, batch_stride_Bias, batch_stride_D, tensor_A0.stride(0), tensor_B0.stride(0), 0, // zero stride for the bias vector reference_D0.stride(0), }; status = reference_gemm0.can_implement(args0); CUTLASS_CHECK(status); status = reference_gemm0(args0); CUTLASS_CHECK(status); using GemmUniversal1 = cutlass::gemm::device::GemmUniversal< typename DualGemm::ElementA, typename DualGemm::LayoutA, typename DualGemm::ElementB, typename DualGemm::LayoutB1, typename DualGemm::ElementC, typename DualGemm::LayoutC, ElementAccumulator >; GemmUniversal1 reference_gemm1; typename GemmUniversal1::Arguments args1 { (batch_count > 1 ? cutlass::gemm::GemmUniversalMode::kBatched : cutlass::gemm::GemmUniversalMode::kGemm), problem_size, batch_count, {alpha1, beta1}, tensor_A0.device_data(), tensor_B1.device_data(), tensor_Bias1.device_data(), reference_D1.device_data(), batch_stride_A, batch_stride_B1, batch_stride_Bias, batch_stride_D, tensor_A0.stride(0), (broadcast_b1 ? 0 : tensor_B1.stride(0)), 0, // zero stride for the bias vector reference_D1.stride(0), }; status = reference_gemm1.can_implement(args1); CUTLASS_CHECK(status); status = reference_gemm1(args1); CUTLASS_CHECK(status); if(relu) { cutlass::reference::device::TensorReLu(reference_D0.device_view()); cutlass::reference::device::TensorReLu(reference_D1.device_view()); } TensorEpilogueForEach<EpilogueOutputOp2>(reference_D0.device_view(), reference_D1.device_view(), reference_D2.device_view()); cudaDeviceSynchronize(); reference_D0.sync_host(); reference_D1.sync_host(); reference_D2.sync_host(); CHECK_GT(cutlass::reference::host::TensorNorm(reference_D0.host_view()), 0); CHECK_GT(cutlass::reference::host::TensorNorm(reference_D1.host_view()), 0); CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D2.host_view()), 0); CHECK_GT(cutlass::reference::host::TensorNorm(reference_D2.host_view()), 0); bool passed_out0 = true; if (DualGemm::kStoreD0) { CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D0.host_view()), 0); passed_out0 = cutlass::reference::host::TensorEquals( reference_D0.host_view(), tensor_D0.host_view()); } CHECK_TRUE(passed_out0); bool passed_out1 = true; if (DualGemm::kStoreD1) { CHECK_GT(cutlass::reference::host::TensorNorm(tensor_D1.host_view()), 0); passed_out1 = cutlass::reference::host::TensorEquals( reference_D1.host_view(), tensor_D1.host_view()); } CHECK_TRUE(passed_out1); bool passed_out2 = cutlass::reference::host::TensorEquals( reference_D2.host_view(), tensor_D2.host_view()); CHECK_TRUE(passed_out2); bool passed = passed_out0 && passed_out1 && passed_out2; if (!passed) { std::stringstream fname; fname << "error_DualGemm_device_fused.txt"; std::cerr << "Dumping results in " << fname.str() << "\n"; std::ofstream file(fname.str()); file << "A0 =\n" << tensor_A0.host_view() << "\nB0 =\n" << tensor_B0.host_view() << "\nC0 =\n" << tensor_C0.host_view() << "\nBias0:\n" << tensor_Bias0.host_view() << "\n" << "\nB1 =\n" << tensor_B1.host_view() << "\nC1 =\n" << tensor_C1.host_view() << "\nBias1:\n" << tensor_Bias1.host_view() << "\n" << "\n\nReference0 =\n" << reference_D0.host_view() << "\nComputed0 =\n" << tensor_D0.host_view() << "\n\nReference1 =\n" << reference_D1.host_view() << "\nComputed1 =\n" << tensor_D1.host_view() << "\n\nReference2 =\n" << reference_D2.host_view() << "\nComputed2 =\n" << tensor_D2.host_view(); } //std::cout << "A0 " << tensor_A0.host_view() << std::endl; // std::cout << "reference_D0 " << reference_D0.host_view() << std::endl; // std::cout << "reference_D1 " << reference_D1.host_view() << std::endl; // std::cout << "reference_D2 " << reference_D2.host_view() << std::endl; //std::cout << "reference_D0 " << reference_D0.host_view() << std::endl; return passed; } }; ////////////////////////////////////////////////////////////////////////////////
examples/45_dual_gemm/dual_gemm_run.h/0
{ "file_path": "examples/45_dual_gemm/dual_gemm_run.h", "repo_id": "examples", "token_count": 13405 }
10
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Hopper GEMM example to create a GEMM kernel with custom Collectives The following example shows how to assemble a custom GEMM kernel that spells out the Collectives directly instead of using a builder and, in the process, instance a more efficient Epilogue (from `cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp`) instead of using the default epilogue. The GemmUniversal API takes 3 main template arguments: (1) the problem shape / extents (2) the collective mainloop type (3) the collective epilogue type While the collecive mainloop can be stamped out using a CollectiveBuilder interface, it is possible to build a custom collective mainloop directly as well. Furthermore, since epilogues do not yet have a builder interface, this example shows how to instantiate a more-efficient epilogue alongside the collective mainloop. Note: there are several ways to implement the GEMM epilogue in Hopper - each with its own set of trade-offs. So it is recommended that users look at the options available under cutlass/epilogue/collective and evaluate for their particular scenario. Please refer to examples 48, 49 to learn more about kernel schedules and other CuTe examples present in `test/unit/cute` to famialiarize with the basics of CuTe. Examples: $ ./examples/50_hopper_gemm_with_epilogue_swizzle/50_hopper_gemm_with_epilogue_swizzle */ #include <iostream> #include "cutlass/cutlass.h" #include "cute/tensor.hpp" #include "cutlass/util/command_line.h" #include "cutlass/tensor_ref.h" #include "cutlass/epilogue/collective/collective_epilogue.hpp" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/gemm/collective/collective_builder.hpp" #include "cutlass/gemm/device/gemm_universal_adapter.h" #include "cutlass/gemm/kernel/gemm_universal.hpp" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/util/command_line.h" #include "cutlass/util/distribution.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/packed_stride.hpp" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm_complex.h" #include "cutlass/util/reference/device/tensor_compare.h" #include "cutlass/util/reference/device/tensor_fill.h" using namespace cute; /////////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; bool error; int m, n, k, l; int alpha, beta; Options(): help(false), error(false), m(2048), n(2048), k(2048), l(1), alpha(1), beta(0) { } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; return; } cmd.get_cmd_line_argument("m", m, 2048); cmd.get_cmd_line_argument("n", n, 2048); cmd.get_cmd_line_argument("k", k, 2048); cmd.get_cmd_line_argument("l", l, 1); cmd.get_cmd_line_argument("alpha", alpha, 1); cmd.get_cmd_line_argument("beta", beta, 0); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "50_hopper_gemm_with_epilogue_swizzle\n\n" << "Hopper GEMM Example with Epilogue Swizzle.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement\n\n" << " --m=<int> Sets the M extent of the GEMM\n" << " --n=<int> Sets the N extent of the GEMM\n" << " --k=<int> Sets the K extent of the GEMM\n" << " --l=<int> Sets the L extent (batch count) of the GEMM\n" << " --alpha=<s32> Epilogue scalar alpha\n" << " --beta=<s32> Epilogue scalar beta\n\n"; return out; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// /// Helper to initialize a block of device data template <class Element> bool initialize_block( cutlass::DeviceAllocation<Element>& block, uint64_t seed=2023) { Element scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else { scope_max = 8; scope_min = -8; } cutlass::reference::device::BlockFillRandomUniform( block.get(), block.size(), seed, scope_max, scope_min, 0); return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) // Wrapper to run and verify a GEMM. template < class Gemm > struct ExampleRunner { using StrideA = typename Gemm::GemmKernel::StrideA; using StrideB = typename Gemm::GemmKernel::StrideB; using StrideC = typename Gemm::GemmKernel::StrideC; using StrideD = typename Gemm::GemmKernel::StrideD; using LayoutA = typename Gemm::LayoutA; using LayoutB = typename Gemm::LayoutB; using LayoutC = typename Gemm::LayoutC; using LayoutD = typename Gemm::LayoutD; using ElementA = typename Gemm::ElementA; using ElementB = typename Gemm::ElementB; using ElementAcc = typename Gemm::ElementAccumulator; using CollectiveEpilogue = typename Gemm::CollectiveEpilogue; using ElementC = typename Gemm::ElementC; using ElementOutput = typename CollectiveEpilogue::ElementOutput; using ElementCompute = typename CollectiveEpilogue::ElementCompute; using ElementAccumulator = typename CollectiveEpilogue::ElementAccumulator; using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape; // // Data members // /// Initialization StrideA stride_A; StrideB stride_B; StrideC stride_C; StrideD stride_D; uint64_t seed = 0; cutlass::DeviceAllocation<ElementA> block_A; cutlass::DeviceAllocation<ElementB> block_B; cutlass::DeviceAllocation<ElementC> block_C; cutlass::DeviceAllocation<ElementOutput> block_D; cutlass::DeviceAllocation<ElementOutput> block_ref_D; // // Methods // bool verify(const ProblemShapeType& problem_size, int32_t alpha, int32_t beta) { auto [M, N, K, L] = problem_size; cutlass::TensorRef ref_A(block_A.get(), LayoutA::packed({M, K})); cutlass::TensorRef ref_B(block_B.get(), LayoutB::packed({K, N})); cutlass::TensorRef ref_C(block_C.get(), LayoutC::packed({M, N})); cutlass::TensorRef ref_D(block_ref_D.get(), LayoutD::packed({M, N})); cutlass::reference::device::GemmComplex( {M, N, K}, ElementCompute(alpha), ref_A, cutlass::ComplexTransform::kNone, ref_B, cutlass::ComplexTransform::kNone, ElementCompute(beta), ref_C, ref_D, ElementAccumulator(0), L, // batch_count M * K, // batch_stride_A K * N, // batch_stride_B M * N, // batch_stride_C M * N // batch_stride_D ); cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cerr << "Reference kernel failed. Last CUDA error: " << cudaGetErrorString(result) << std::endl; return false; } // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::device::BlockCompareEqual(block_ref_D.get(), block_D.get(), block_D.size()); return passed; } /// Initialize operands to be used in the GEMM and reference GEMM void initialize(const ProblemShapeType& problem_size) { auto problem_shape_MNKL = cute::append<4>(problem_size, 1); auto [M, N, K, L] = problem_shape_MNKL; stride_A = cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(M, K, L)); stride_B = cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(N, K, L)); stride_C = cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(M, N, L)); stride_D = cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(M, N, L)); block_A.reset(M * K * L); block_B.reset(K * N * L); block_C.reset(M * N * L); block_D.reset(M * N * L); block_ref_D.reset(M * N * L); initialize_block(block_A, seed + 2023); initialize_block(block_B, seed + 2022); initialize_block(block_C, seed + 2021); } bool run(const Options& options, const cutlass::KernelHardwareInfo& hw_info) { ProblemShapeType problem_size = ProblemShapeType{options.m, options.n, options.k, options.l}; initialize(problem_size); typename Gemm::GemmKernel::Arguments arguments{ cutlass::gemm::GemmUniversalMode::kGemm, problem_size, {block_A.get(), stride_A, block_B.get(), stride_B}, {{options.alpha, options.beta}, block_C.get(), stride_C, block_D.get(), stride_D}, hw_info }; Gemm gemm_op; size_t workspace_size = Gemm::get_workspace_size(arguments); cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); cutlass::Status status = gemm_op.can_implement(arguments); if (status != cutlass::Status::kSuccess) { std::cerr << "This kernel is not supported. Last CUDA error is: " << cudaGetErrorString(cudaGetLastError()) << std::endl; return false; } status = gemm_op.initialize(arguments, workspace.get()); if (status != cutlass::Status::kSuccess) { std::cerr << "Failed to initialize the CUTLASS kernel. Last CUDA error is: " << cudaGetErrorString(cudaGetLastError()) << std::endl; return false; } // Run the GEMM status = gemm_op.run(); if (status != cutlass::Status::kSuccess) { std::cerr << "Failed to launch the CUTLASS kernel. Last CUDA error is: " << cudaGetErrorString(cudaGetLastError()) << std::endl; return false; } cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cerr << "Error running the CUTLASS kernel. Last CUDA error is: " << cudaGetErrorString(result) << std::endl; return false; } // Verify that the result is correct bool passed = verify(problem_size, options.alpha, options.beta); if (!passed) { std::cerr << "Reference check failed" << std::endl; } return passed; } }; #endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) /////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (__CUDACC_VER_MAJOR__ < 12 || props.major < 9) { std::cout << "This example requires a GPU of NVIDIA's Hopper Architecture or " << "later (compute capability 90 or greater) and CUDA 12.0 or greater.\n"; return 0; } // // Parse options // Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.error) { std::cerr << "Aborting execution." << std::endl; return -1; } #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) // // Run examples // // The KernelHardwareInfo struct holds the number of SMs on the GPU with a given device ID. This // information is used by the underlying kernel. cutlass::KernelHardwareInfo hw_info; // Change device_id to another value if you are running on a machine with multiple GPUs and wish // to use a GPU other than that with device ID 0. hw_info.device_id = 0; hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id); bool passed; // Problem configuration using ElementA = int8_t; using ElementB = int8_t; using ElementAcc = int32_t; using ElementOutput = int8_t; // Note : Only TN WGMMA Gemm is supported currently in 3.0 using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using LayoutD = cutlass::layout::ColumnMajor; // Tiling configuration selection using TileShape = Shape<_128,_64,_128>; // Choosing a thread block cluster larger than 1 allows us to Multicast data across thread blocks using ClusterShape = Shape<_1,_2,_1>; // // Assembling the CollectiveMainloop type // // Pipeline Depth to be used i.e number of A, B buffers in shared memory constexpr int PipelineStages = 8; // Let's choose a Warp-Specialized Mainloop implemention which uses TMA // Note : This requires / assumes the tensors to be 16B aligned using DispatchPolicy = cutlass::gemm::MainloopSm90TmaGmmaWarpSpecialized<PipelineStages, ClusterShape, cutlass::gemm::KernelTmaWarpSpecialized>; // TN => K Major for both A & B static constexpr cute::GMMA::Major GmmaMajorA = cute::GMMA::Major::K; static constexpr cute::GMMA::Major GmmaMajorB = cute::GMMA::Major::K; // We use the SS op selector as both A, B operands are read directly from SMEM (for TN WGMMA) using TiledMma = decltype(cute::make_tiled_mma(cute::GMMA::ss_op_selector< ElementA, ElementB, ElementAcc, TileShape, GmmaMajorA, GmmaMajorB>())); // A loads can be optimized with multicast if cluster-n > 1 using GmemTiledCopyA = std::conditional< cute::size(shape<1>(ClusterShape{})) == 1, cute::SM90_TMA_LOAD, cute::SM90_TMA_LOAD_MULTICAST>::type; // B loads can be optimized with multicast if cluster-m > 1 using GmemTiledCopyB = std::conditional< cute::size(shape<0>(ClusterShape{})) == 1, cute::SM90_TMA_LOAD, cute::SM90_TMA_LOAD_MULTICAST>::type; using SmemLayoutAtomA = decltype(cutlass::gemm::collective::detail::ss_smem_selector< GmmaMajorA, ElementA, decltype(cute::get<0>(TileShape{})), decltype(cute::get<2>(TileShape{})) >()); using SmemLayoutAtomB = decltype(cutlass::gemm::collective::detail::ss_smem_selector< GmmaMajorB, ElementB, decltype(cute::get<1>(TileShape{})), decltype(cute::get<2>(TileShape{})) >()); using CollectiveMainloop = cutlass::gemm::collective::CollectiveMma< DispatchPolicy, TileShape, ElementA, cutlass::gemm::TagToStrideA_t<LayoutA>, ElementB, cutlass::gemm::TagToStrideB_t<LayoutB>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, void, // Does not need a SmemCopyAtom, since A is read directly from SMEM cute::identity, GmemTiledCopyB, SmemLayoutAtomB, void, // Does not need a SmemCopyAtom, since B is read directly from SMEM cute::identity >; // // Assembling the Collective Epilogue Type // // Break the 128 along TILE_M into chunks of 32, to get a 128B leading dimension using PreSwizzleLayout = Layout< Shape< Shape <_32,_4 >,_64>, Stride<Stride< _1,_2048>,_32>>; // 128 threads loading 16 elements each (to get vectorized global stores) using TileShapeS2R = Shape<_128,_16>; // Layout to ensure bank-conflict free loads & stores using SmemLayout = ComposedLayout< Swizzle<3,4,3>, smem_ptr_flag_bits<sizeof_bits<ElementAcc>::value>, PreSwizzleLayout>; // Tiled copy from Smem to Registers // Note : CuTe will vectorize this copy if the tiling + swizzling above were right using TiledCopyS2R = TiledCopy< Copy_Atom<DefaultCopy, ElementAcc>, Layout< Shape<_128,_16>, Stride<_16,_1>>, TileShapeS2R>; using Epilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< cutlass::epilogue::collective::Epilogue< cutlass::gemm::TagToStrideC_t<LayoutC>, cutlass::gemm::TagToStrideC_t<LayoutD>, cutlass::epilogue::thread::LinearCombination<int32_t, 1, int32_t, int32_t>, SmemLayout, Copy_Atom<DefaultCopy, ElementAcc>, TiledCopyS2R, Copy_Atom<DefaultCopy, ElementOutput>>>; // // Assembling the GemmKernel // using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, Epilogue >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; ExampleRunner<Gemm> runner; passed = runner.run(options, hw_info); std::cout << "WGMMA GEMM with Epilogue Swizzle : " << (passed ? "Passed" : "Failed") << std::endl; #endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
examples/50_hopper_gemm_with_epilogue_swizzle/50_hopper_gemm_with_epilogue_swizzle.cu/0
{ "file_path": "examples/50_hopper_gemm_with_epilogue_swizzle/50_hopper_gemm_with_epilogue_swizzle.cu", "repo_id": "examples", "token_count": 7056 }
11
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ // Command line options parsing struct Options { bool help = false; float alpha = 1.f, beta = 0.f; float scale_a = 1.f, scale_b = 1.f, scale_c = 1.f, scale_d = 1.f, scale_aux = 1.f; bool device_scale = false; bool save_aux = true; bool save_amax = true; int iterations = 1000; int m = 1024, n = 512, k = 1024, l = 1; // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; return; } cmd.get_cmd_line_argument("m", m); cmd.get_cmd_line_argument("n", n); cmd.get_cmd_line_argument("k", k); cmd.get_cmd_line_argument("l", l); cmd.get_cmd_line_argument("alpha", alpha, 1.f); cmd.get_cmd_line_argument("beta", beta, 0.f); cmd.get_cmd_line_argument("scale_a", scale_a, 1.f); cmd.get_cmd_line_argument("scale_b", scale_b, 1.f); cmd.get_cmd_line_argument("scale_c", scale_c, 1.f); cmd.get_cmd_line_argument("scale_d", scale_d, 1.f); cmd.get_cmd_line_argument("scale_aux", scale_aux, 1.f); cmd.get_cmd_line_argument("device_scale", device_scale, false); cmd.get_cmd_line_argument("save_aux", save_aux, true); cmd.get_cmd_line_argument("save_amax", save_amax, true); cmd.get_cmd_line_argument("iterations", iterations); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "54_fp8_hopper_warp_specialized_gemm\n\n" << " Hopper FP8 GEMM using a Warp Specialized kernel.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement\n\n" << " --m=<int> Sets the M extent of the GEMM\n" << " --n=<int> Sets the N extent of the GEMM\n" << " --k=<int> Sets the K extent of the GEMM\n" << " --l=<int> Sets the l extent (batch) of the GEMM\n" << " --alpha=<f32> Epilogue scalar alpha\n" << " --beta=<f32> Epilogue scalar beta\n" << " --scale_a=<f32> Scaling factor for A\n" << " --scale_b=<f32> Scaling factor for B\n" << " --scale_c=<f32> Scaling factor for C\n" << " --scale_d=<f32> Scaling factor for D (ignored for non-fp8 D)\n" << " --scale_aux=<f32> Scaling factor for the auxiliary tensor (ignored for non-fp8 aux)\n" << " --device_scale=<bool> Copy scalars to device memory before kernel launch (default: false)\n" << " --save_aux=<bool> Save the pre-activation as an auxiliary tensor (default: true)\n" << " --save_amax=<bool> Save the pre-scaled max absolute value of any fp8 outputs (aux and/or D) (default: true)\n" << " --iterations=<int> Number of profiling iterations to perform.\n\n"; out << "\n\nExamples:\n\n" << "$ " << "54_fp8_hopper_warp_specialized_gemm" << " --m=1024 --n=512 --k=1024 --alpha=2 --beta=0.707 \n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Two flops per multiply-add uint64_t flop = uint64_t(2) * m * n * k; double gflop = double(flop) / double(1.0e9); return gflop / runtime_s; } };
examples/54_hopper_fp8_warp_specialized_gemm/hopper_fp8_commandline.hpp/0
{ "file_path": "examples/54_hopper_fp8_warp_specialized_gemm/hopper_fp8_commandline.hpp", "repo_id": "examples", "token_count": 1994 }
12
# Examples of using the CUTLASS Python interface * [00_basic_gemm](/examples/python/00_basic_gemm.ipynb) Shows how declare, configure, compile, and run a CUTLASS GEMM using the Python interface * [01_epilogue](/examples/python/01_epilogue.ipynb) Shows how to fuse elementwise activation functions to GEMMs via the Python interface * [02_pytorch_extension_grouped_gemm](/examples/python/02_pytorch_extension_grouped_gemm.ipynb) Shows how to declare, compile, and run a grouped GEMM operation via the Python interface, along with how the emitted kernel can be easily exported to a PyTorch CUDA extension. * [03_basic_conv2d](/examples/python/03_basic_conv2d.ipynb) Shows how to declare, configure, compile, and run a CUTLASS Conv2d using the Python interface * [04_epilogue_visitor](/examples/python/04_epilogue_visitor.ipynb) Shows how to fuse elementwise activation functions to GEMMs via the Python Epilogue Visitor interface
examples/python/README.md/0
{ "file_path": "examples/python/README.md", "repo_id": "examples", "token_count": 317 }
13
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/arch/copy.hpp> // Config #if defined(__clang__) && defined(__CUDA__) // ldmatrix PTX instructions added in Clang 14: https://reviews.llvm.org/D107046 // ... but will not work until Clang 15: // * https://reviews.llvm.org/D121666 // * https://reviews.llvm.org/D126846 #define CUTE_ARCH_CLANG_SUPPORTS_LDSM_SM75 (__clang_major__ >= 15) #endif #if defined(__NVCC__) || defined(__CUDACC_RTC__) // ldmatrix PTX instruction added in CUDA 10.2+ #define CUTE_ARCH_NVCC_SUPPORTS_LDSM_SM75 ((__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2) || __CUDACC_VER_MAJOR__ >= 11) #endif #if ! defined(CUTE_ARCH_LDSM_SM75_SUPPORTED) #define CUTE_ARCH_LDSM_SM75_SUPPORTED (CUTE_ARCH_NVCC_SUPPORTS_LDSM_SM75 || CUTE_ARCH_CLANG_SUPPORTS_LDSM_SM75) #endif #if ! defined(CUTE_ARCH_LDSM_SM75_ENABLED) #define CUTE_ARCH_LDSM_SM75_ENABLED (CUTE_ARCH_LDSM_SM75_SUPPORTED) #endif #if (CUTE_ARCH_LDSM_SM75_ENABLED) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 750 #define CUTE_ARCH_LDSM_SM75_ACTIVATED 1 #endif namespace cute { struct SM75_U32x1_LDSM_N { using SRegisters = uint128_t[1]; using DRegisters = uint32_t[1]; CUTE_HOST_DEVICE static void copy(uint128_t const& smem_src, uint32_t& dst) { #if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED) uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_src); asm volatile ("ldmatrix.sync.aligned.x1.m8n8.shared.b16 {%0}, [%1];\n" : "=r"(dst) : "r"(smem_int_ptr)); #else CUTE_INVALID_CONTROL_PATH("Trying to use ldmatrix without CUTE_ARCH_LDSM_SM75_ACTIVATED."); #endif } }; struct SM75_U32x2_LDSM_N { using SRegisters = uint128_t[1]; using DRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void copy(uint128_t const& smem_src, uint32_t& dst0, uint32_t& dst1) { #if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED) uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_src); asm volatile ("ldmatrix.sync.aligned.x2.m8n8.shared.b16 {%0, %1}, [%2];\n" : "=r"(dst0), "=r"(dst1) : "r"(smem_int_ptr)); #else CUTE_INVALID_CONTROL_PATH("Trying to use ldmatrix without CUTE_ARCH_LDSM_SM75_ACTIVATED."); #endif } }; struct SM75_U32x4_LDSM_N { using SRegisters = uint128_t[1]; using DRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void copy(uint128_t const& smem_src, uint32_t& dst0, uint32_t& dst1, uint32_t& dst2, uint32_t& dst3) { #if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED) uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_src); asm volatile ("ldmatrix.sync.aligned.x4.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];\n" : "=r"(dst0), "=r"(dst1), "=r"(dst2), "=r"(dst3) : "r"(smem_int_ptr)); #else CUTE_INVALID_CONTROL_PATH("Trying to use ldmatrix without CUTE_ARCH_LDSM_SM75_ACTIVATED."); #endif } }; struct SM75_U16x2_LDSM_T { using SRegisters = uint128_t[1]; using DRegisters = uint32_t[1]; CUTE_HOST_DEVICE static void copy(uint128_t const& smem_src, uint32_t& dst) { #if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED) uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_src); asm volatile ("ldmatrix.sync.aligned.x1.trans.m8n8.shared.b16 {%0}, [%1];\n" : "=r"(dst) : "r"(smem_int_ptr)); #else CUTE_INVALID_CONTROL_PATH("Trying to use ldmatrix without CUTE_ARCH_LDSM_SM75_ACTIVATED."); #endif } }; struct SM75_U16x4_LDSM_T { using SRegisters = uint128_t[1]; using DRegisters = uint32_t[2]; CUTE_HOST_DEVICE static void copy(uint128_t const& smem_src, uint32_t& dst0, uint32_t& dst1) { #if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED) uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_src); asm volatile ("ldmatrix.sync.aligned.x2.trans.m8n8.shared.b16 {%0, %1}, [%2];\n" : "=r"(dst0), "=r"(dst1) : "r"(smem_int_ptr)); #else CUTE_INVALID_CONTROL_PATH("Trying to use ldmatrix without CUTE_ARCH_LDSM_SM75_ACTIVATED."); #endif } }; struct SM75_U16x8_LDSM_T { using SRegisters = uint128_t[1]; using DRegisters = uint32_t[4]; CUTE_HOST_DEVICE static void copy(uint128_t const& smem_src, uint32_t& dst0, uint32_t& dst1, uint32_t& dst2, uint32_t& dst3) { #if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED) uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_src); asm volatile ("ldmatrix.sync.aligned.x4.trans.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];\n" : "=r"(dst0), "=r"(dst1), "=r"(dst2), "=r"(dst3) : "r"(smem_int_ptr)); #else CUTE_INVALID_CONTROL_PATH("Trying to use ldmatrix without CUTE_ARCH_LDSM_SM75_ACTIVATED."); #endif } }; // // Legacy LDSM interfaces that aren't very useful // template <class T> CUTE_HOST_DEVICE void copy_ldsm(uint128_t const* const smem_ptr, T* rmem_ptr) { uint32_t* reg_ptr = reinterpret_cast<uint32_t*>(rmem_ptr); // if constexpr if (sizeof(T) == 4) { SM75_U32x1_LDSM_N::copy(smem_ptr[0], reg_ptr[0]); } else if (sizeof(T) == 8) { SM75_U32x2_LDSM_N::copy(smem_ptr[0], reg_ptr[0], reg_ptr[1]); } else if (sizeof(T) == 16) { SM75_U32x4_LDSM_N::copy(smem_ptr[0], reg_ptr[0], reg_ptr[1], reg_ptr[2], reg_ptr[3]); } else { static_assert(sizeof(T) == 4 || sizeof(T) == 8 || sizeof(T) == 16, "sizeof(T) is not supported"); } } template <class T> CUTE_HOST_DEVICE void copy_ldsm_trans(uint128_t const* const smem_ptr, T* rmem_ptr) { uint32_t* reg_ptr = reinterpret_cast<uint32_t*>(rmem_ptr); // if constexpr if (sizeof(T) == 4) { SM75_U16x2_LDSM_T::copy(smem_ptr[0], reg_ptr[0]); } else if (sizeof(T) == 8) { SM75_U16x4_LDSM_T::copy(smem_ptr[0], reg_ptr[0], reg_ptr[1]); } else if (sizeof(T) == 16) { SM75_U16x8_LDSM_T::copy(smem_ptr[0], reg_ptr[0], reg_ptr[1], reg_ptr[2], reg_ptr[3]); } else { static_assert(sizeof(T) == 4 || sizeof(T) == 8 || sizeof(T) == 16, "sizeof(T) is not supported"); } } } // end namespace cute
include/cute/arch/copy_sm75.hpp/0
{ "file_path": "include/cute/arch/copy_sm75.hpp", "repo_id": "include", "token_count": 3399 }
14
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/util/type_traits.hpp> #include <cute/numeric/math.hpp> #include <cute/numeric/integral_constant.hpp> namespace cute { /** Compile-time rational arithmetic type. * Like cute::C for std::integral_constant, cute::R for std::ratio has a short name * for error messages and compile times. * The static data members @a num and @a den represent the reduced numerator and denominator * of the rational value. Thus, two cute::R types with different @a n or @a d are distinct types * even if they represent the same rational value. * A cute::R exposes the reduced canonical type via its ::type member. * That is, cute::R<3,6>::type is cute::R<1,2> and cute::R<6,3>::type is cute::C<2>. * A cute::R<n,d>::value can be used much like any other trait::value. It can be involved in * arithmetic expressions (according to the operator-overloads for cute::C and cute::R, * though these may be incomplete) but with a potential rational value rather than an integral value. */ template <auto n, auto d> class R { static_assert(d != 0); static constexpr auto an = abs(n); static constexpr auto ad = abs(d); static constexpr auto g = gcd(an, ad); public: static constexpr auto num = signum(n) * signum(d) * an / g; static constexpr auto den = ad / g; // RI: den >= 1 && gcd(abs(num),den) == 1 using type = typename conditional<num == 0 || den == 1, C<num>, R<num,den>>::type; }; template <class T> struct is_ratio : false_type {}; template <auto n, auto d> struct is_ratio<R<n,d>> : true_type {}; template <auto a, auto b> CUTE_HOST_DEVICE constexpr typename R<a,b>::type ratio(C<a>, C<b>) { return {}; } template <auto a, auto b, auto c> CUTE_HOST_DEVICE constexpr typename R<a*c,b>::type ratio(C<a>, R<b,c>) { return {}; } template <auto a, auto b, auto c> CUTE_HOST_DEVICE constexpr typename R<b,a*c>::type ratio(R<b,c>, C<a>) { return {}; } template <auto a, auto b, auto c, auto d> CUTE_HOST_DEVICE constexpr typename R<a*d,b*c>::type ratio(R<a,b>, R<c,d>) { return {}; } // // Non-reduced ratio implementations // template <auto a, auto b> CUTE_HOST_DEVICE constexpr R<a,b> nratio(C<a>, C<b>) { return {}; } template <auto a, auto b, auto c> CUTE_HOST_DEVICE constexpr R<a*c,b> nratio(C<a>, R<b,c>) { return {}; } template <auto a, auto b, auto c> CUTE_HOST_DEVICE constexpr R<b,a*c> nratio(R<b,c>, C<a>) { return {}; } template <auto a, auto b, auto c, auto d> CUTE_HOST_DEVICE constexpr R<a*d,b*c> nratio(R<a,b>, R<c,d>) { return {}; } // // Operators // template <auto a, auto b, auto x, auto y> CUTE_HOST_DEVICE constexpr typename R<a*x,b*y>::type operator*(R<a,b>, R<x,y>) { return {}; } template <auto a, auto b, auto c> CUTE_HOST_DEVICE constexpr typename R<a*c,b>::type operator*(R<a,b>, C<c>) { return {}; } template <auto c, auto a, auto b> CUTE_HOST_DEVICE constexpr typename R<a*c,b>::type operator*(C<c>, R<a,b>) { return {}; } template <auto c, auto a, auto b> CUTE_HOST_DEVICE constexpr typename R<c*b,a>::type operator/(C<c>, R<a,b>) { return {}; } // Product with dynamic type needs to produce an integer... template <class C, auto a, auto b, __CUTE_REQUIRES(cute::is_std_integral<C>::value)> CUTE_HOST_DEVICE constexpr auto operator*(C const& c, R<a,b>) { return c * R<a,b>::num / R<a,b>::den; } // Product with dynamic type needs to produce an integer... template <auto a, auto b, class C, __CUTE_REQUIRES(cute::is_std_integral<C>::value)> CUTE_HOST_DEVICE constexpr auto operator*(R<a,b>, C const& c) { return c * R<a,b>::num / R<a,b>::den; } template <auto a, auto b, auto x, auto y> CUTE_HOST_DEVICE constexpr typename R<a*y+b*x, b*y>::type operator+(R<a,b>, R<x,y>) { return {}; } template <auto a, auto b, auto c> CUTE_HOST_DEVICE constexpr typename R<a+c*b,b>::type operator+(R<a,b>, C<c>) { return {}; } template <auto c, auto a, auto b> CUTE_HOST_DEVICE constexpr typename R<a+c*b,b>::type operator+(C<c>, R<a,b>) { return {}; } template <auto a, auto b, auto x, auto y> CUTE_HOST_DEVICE constexpr bool_constant<R<a,b>::num == R<x,y>::num && R<a,b>::den == R<x,y>::den> operator==(R<a,b>, R<x,y>) { return {}; } template <auto a, auto b, auto c> CUTE_HOST_DEVICE constexpr bool_constant<R<a,b>::num == c && R<a,b>::den == 1> operator==(R<a,b>, C<c>) { return {}; } template <auto c, auto a, auto b> CUTE_HOST_DEVICE constexpr bool_constant<R<a,b>::num == c && R<a,b>::den == 1> operator==(C<c>, R<a,b>) { return {}; } template <auto a, auto b> CUTE_HOST_DEVICE constexpr typename R<abs(a),abs(b)>::type abs(R<a,b>) { return {}; } template <auto a, auto b> CUTE_HOST_DEVICE constexpr int32_t log_2(R<a,b>) { static_assert(R<a,b>::num > 0); static_assert(R<a,b>::den > 0); return log_2(static_cast<uint32_t>(R<a,b>::num)) - log_2(static_cast<uint32_t>(R<a,b>::den)); } // @return A non-reduced ratio cute::R of the Trait0::value / Trait1::value template <class Trait0, class Trait1> CUTE_HOST_DEVICE constexpr auto trait_ratio(Trait0, Trait1) { return nratio(static_value<Trait0>(), static_value<Trait1>()); } // // Display utilities // template <auto a, auto b> CUTE_HOST_DEVICE void print(R<a,b>) { print(C<a>{}); print("/"); print(C<b>{}); } #if !defined(__CUDACC_RTC__) template <auto a, auto b> CUTE_HOST std::ostream& operator<<(std::ostream& os, R<a,b>) { return os << "_" << C<a>{} << "/" << C<b>{}; } #endif } // end namespace cute
include/cute/numeric/integral_ratio.hpp/0
{ "file_path": "include/cute/numeric/integral_ratio.hpp", "repo_id": "include", "token_count": 2827 }
15
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/type_traits> #include <cuda/std/utility> #include <cuda/std/cstddef> #include <cuda/std/cstdint> #include <cuda/std/limits> #else #include <type_traits> #include <utility> // tuple_size, tuple_element #include <cstddef> // ptrdiff_t #include <cstdint> // uintptr_t #include <limits> // numeric_limits #endif #include <cute/config.hpp> namespace cute { using CUTE_STL_NAMESPACE::enable_if; using CUTE_STL_NAMESPACE::enable_if_t; } #define __CUTE_REQUIRES(...) typename cute::enable_if<(__VA_ARGS__)>::type* = nullptr #define __CUTE_REQUIRES_V(...) typename cute::enable_if<decltype((__VA_ARGS__))::value>::type* = nullptr namespace cute { // <type_traits> using CUTE_STL_NAMESPACE::conjunction; using CUTE_STL_NAMESPACE::conjunction_v; using CUTE_STL_NAMESPACE::disjunction; using CUTE_STL_NAMESPACE::disjunction_v; using CUTE_STL_NAMESPACE::negation; using CUTE_STL_NAMESPACE::negation_v; using CUTE_STL_NAMESPACE::void_t; using CUTE_STL_NAMESPACE::is_void_v; using CUTE_STL_NAMESPACE::is_base_of; using CUTE_STL_NAMESPACE::is_base_of_v; using CUTE_STL_NAMESPACE::is_const; using CUTE_STL_NAMESPACE::is_const_v; using CUTE_STL_NAMESPACE::is_volatile; using CUTE_STL_NAMESPACE::is_volatile_v; // using CUTE_STL_NAMESPACE::true_type; // using CUTE_STL_NAMESPACE::false_type; using CUTE_STL_NAMESPACE::conditional; using CUTE_STL_NAMESPACE::conditional_t; using CUTE_STL_NAMESPACE::remove_const_t; using CUTE_STL_NAMESPACE::remove_cv_t; using CUTE_STL_NAMESPACE::remove_reference_t; using CUTE_STL_NAMESPACE::extent; using CUTE_STL_NAMESPACE::remove_extent; using CUTE_STL_NAMESPACE::decay; using CUTE_STL_NAMESPACE::decay_t; using CUTE_STL_NAMESPACE::is_lvalue_reference; using CUTE_STL_NAMESPACE::is_lvalue_reference_v; using CUTE_STL_NAMESPACE::is_reference; using CUTE_STL_NAMESPACE::is_trivially_copyable; using CUTE_STL_NAMESPACE::is_convertible; using CUTE_STL_NAMESPACE::is_convertible_v; using CUTE_STL_NAMESPACE::is_same; using CUTE_STL_NAMESPACE::is_same_v; using CUTE_STL_NAMESPACE::is_arithmetic; using CUTE_STL_NAMESPACE::is_unsigned; using CUTE_STL_NAMESPACE::is_unsigned_v; using CUTE_STL_NAMESPACE::is_signed; using CUTE_STL_NAMESPACE::is_signed_v; using CUTE_STL_NAMESPACE::make_signed; using CUTE_STL_NAMESPACE::make_signed_t; // using CUTE_STL_NAMESPACE::is_integral; template <class T> using is_std_integral = CUTE_STL_NAMESPACE::is_integral<T>; using CUTE_STL_NAMESPACE::is_empty; using CUTE_STL_NAMESPACE::is_empty_v; using CUTE_STL_NAMESPACE::invoke_result_t; using CUTE_STL_NAMESPACE::common_type; using CUTE_STL_NAMESPACE::common_type_t; using CUTE_STL_NAMESPACE::remove_pointer; using CUTE_STL_NAMESPACE::remove_pointer_t; // <utility> using CUTE_STL_NAMESPACE::declval; template <class T> constexpr T&& forward(remove_reference_t<T>& t) noexcept { return static_cast<T&&>(t); } template <class T> constexpr T&& forward(remove_reference_t<T>&& t) noexcept { static_assert(! is_lvalue_reference_v<T>, "T cannot be an lvalue reference (e.g., U&)."); return static_cast<T&&>(t); } template <class T> constexpr remove_reference_t<T>&& move(T&& t) noexcept { return static_cast<remove_reference_t<T>&&>(t); } // <limits> using CUTE_STL_NAMESPACE::numeric_limits; // <cstddef> using CUTE_STL_NAMESPACE::ptrdiff_t; // <cstdint> using CUTE_STL_NAMESPACE::uintptr_t; // C++20 // using std::remove_cvref; template <class T> struct remove_cvref { using type = remove_cv_t<remove_reference_t<T>>; }; // C++20 // using std::remove_cvref_t; template <class T> using remove_cvref_t = typename remove_cvref<T>::type; // // dependent_false // // @brief An always-false value that depends on one or more template parameters. // See // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1830r1.pdf // https://github.com/cplusplus/papers/issues/572 // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2022/p2593r0.html template <class... Args> inline constexpr bool dependent_false = false; // // tuple_size, tuple_element // // @brief CuTe-local tuple-traits to prevent conflicts with other libraries. // For cute:: types, we specialize std::tuple-traits, which is explicitly allowed. // cute::tuple, cute::array, cute::array_subbyte, etc // But CuTe wants to treat some external types as tuples as well. For those, // we specialize cute::tuple-traits to avoid polluting external traits. // dim3, uint3, etc template <class T, class = void> struct tuple_size; template <class T> struct tuple_size<T,void_t<typename CUTE_STL_NAMESPACE::tuple_size<T>::type>> : CUTE_STL_NAMESPACE::integral_constant<size_t, CUTE_STL_NAMESPACE::tuple_size<T>::value> {}; // S = : std::integral_constant<std::size_t, std::tuple_size<T>::value> {}; template <class T> constexpr size_t tuple_size_v = tuple_size<T>::value; template <size_t I, class T, class = void> struct tuple_element; template <size_t I, class T> struct tuple_element<I,T,void_t<typename CUTE_STL_NAMESPACE::tuple_element<I,T>::type>> : CUTE_STL_NAMESPACE::tuple_element<I,T> {}; template <size_t I, class T> using tuple_element_t = typename tuple_element<I,T>::type; // // is_valid // namespace detail { template <class F, class... Args, class = decltype(declval<F&&>()(declval<Args&&>()...))> CUTE_HOST_DEVICE constexpr auto is_valid_impl(int) { return CUTE_STL_NAMESPACE::true_type{}; } template <class F, class... Args> CUTE_HOST_DEVICE constexpr auto is_valid_impl(...) { return CUTE_STL_NAMESPACE::false_type{}; } template <class F> struct is_valid_fn { template <class... Args> CUTE_HOST_DEVICE constexpr auto operator()(Args&&...) const { return is_valid_impl<F, Args&&...>(int{}); } }; } // end namespace detail template <class F> CUTE_HOST_DEVICE constexpr auto is_valid(F&&) { return detail::is_valid_fn<F&&>{}; } template <class F, class... Args> CUTE_HOST_DEVICE constexpr auto is_valid(F&&, Args&&...) { return detail::is_valid_impl<F&&, Args&&...>(int{}); } template <bool B, template<class...> class True, template<class...> class False> struct conditional_template { template <class... U> using type = True<U...>; }; template <template<class...> class True, template<class...> class False> struct conditional_template<false, True, False> { template <class... U> using type = False<U...>; }; } // end namespace cute
include/cute/util/type_traits.hpp/0
{ "file_path": "include/cute/util/type_traits.hpp", "repo_id": "include", "token_count": 3088 }
16
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Matrix multiply */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "mma.h" #include "cutlass/layout/matrix.h" #include "cutlass/numeric_types.h" //////////////////////////////////////////////////////////////////////////////// #if ((__CUDACC_VER_MAJOR__ > 11) || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 8)) #define CUTLASS_ARCH_MMA_SM90_F64_MMA_SUPPORTED #if (!defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED)) #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900)) #define CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED #endif #endif #endif #if (__CUDACC_VER_MAJOR__ >= 12) #define CUTLASS_ARCH_MMA_SM90_SUPPORTED #if (!defined(CUTLASS_ARCH_MMA_SM90_ENABLED)) #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900)) #define CUTLASS_ARCH_MMA_SM90_ENABLED #endif #endif #endif #if ((__CUDACC_VER_MAJOR__ > 12) || ((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ >= 3))) #define CUTLASS_ARCH_MMA_MODIFIABLE_TMA_SM90_SUPPORTED #endif //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace arch { //////////////////////////////////////////////////////////////////////////////// /// Matrix Multiply-Add 16x8x4 fp64 //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: F64 = F64 * F64 + F64 template <> struct Mma< gemm::GemmShape<16,8,4>, 32, double, layout::RowMajor, double, layout::ColumnMajor, double, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<16,8,4>; using ElementA = double; using LayoutA = layout::RowMajor; using FragmentA = Array<double, 2>; using ElementB = double; using LayoutB = layout::ColumnMajor; using FragmentB = Array<double, 1>; using ElementC = double; using LayoutC = layout::RowMajor; using FragmentC = Array<double, 4>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm90; CUTLASS_HOST_DEVICE void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c) const { #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) double const *A = reinterpret_cast<double const *>(&a); double const *B = reinterpret_cast<double const *>(&b); double const *C = reinterpret_cast<double const *>(&c); double *D = reinterpret_cast<double *>(&d); asm volatile("mma.sync.aligned.m16n8k4.row.col.f64.f64.f64.f64.rn {%0, %1, %2, %3}, {%4, %5}, {%6}, {%7, %8, %9, %10};\n" : "=d"(D[0]), "=d"(D[1]), "=d"(D[2]), "=d"(D[3]) : "d"(A[0]), "d"(A[1]), "d"(B[0]), "d"(C[0]), "d"(C[1]), "d"(C[2]), "d"(C[3])); #else CUTLASS_UNUSED(d); CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_NOT_IMPLEMENTED(); #endif } }; //////////////////////////////////////////////////////////////////////////////// /// Matrix Multiply-Add 16x8x8 fp64 //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: F64 = F64 * F64 + F64 template <> struct Mma< gemm::GemmShape<16,8,8>, 32, double, layout::RowMajor, double, layout::ColumnMajor, double, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<16,8,8>; using ElementA = double; using LayoutA = layout::RowMajor; using FragmentA = Array<double, 4>; using ElementB = double; using LayoutB = layout::ColumnMajor; using FragmentB = Array<double, 2>; using ElementC = double; using LayoutC = layout::RowMajor; using FragmentC = Array<double, 4>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm90; CUTLASS_HOST_DEVICE void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c) const { #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) double const *A = reinterpret_cast<double const *>(&a); double const *B = reinterpret_cast<double const *>(&b); double const *C = reinterpret_cast<double const *>(&c); double *D = reinterpret_cast<double *>(&d); asm volatile("mma.sync.aligned.m16n8k8.row.col.f64.f64.f64.f64 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%10, %11, %12, %13};\n" : "=d"(D[0]), "=d"(d[1]), "=d"(d[2]), "=d"(d[3]) : "d"(A[0]), "d"(A[1]), "d"(A[2]), "d"(A[3]), "d"(B[0]), "d"(B[1]), "d"(C[0]), "d"(C[1]), "d"(C[2]), "d"(C[3])); #else CUTLASS_UNUSED(d); CUTLASS_UNUSED(a); CUTLASS_UNUSED(b); CUTLASS_UNUSED(c); CUTLASS_NOT_IMPLEMENTED(); #endif } }; //////////////////////////////////////////////////////////////////////////////// /// Matrix Multiply-Add 16x8x16 fp64 //////////////////////////////////////////////////////////////////////////////// /// Matrix multiply-add operation: F64 = F64 * F64 + F64 template <> struct Mma< gemm::GemmShape<16,8,16>, 32, double, layout::RowMajor, double, layout::ColumnMajor, double, layout::RowMajor, OpMultiplyAdd> { using Shape = gemm::GemmShape<16,8,16>; using ElementA = double; using LayoutA = layout::RowMajor; using FragmentA = Array<double, 8>; using ElementB = double; using LayoutB = layout::ColumnMajor; using FragmentB = Array<double, 4>; using ElementC = double; using LayoutC = layout::RowMajor; using FragmentC = Array<double, 4>; using Operator = OpMultiplyAdd; using ArchTag = arch::Sm90; CUTLASS_HOST_DEVICE void operator()(FragmentC &d, FragmentA const &a, FragmentB const &b, FragmentC const &c) const { #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED) double const *A = reinterpret_cast<double const *>(&a); double const *B = reinterpret_cast<double const *>(&b); double const *C = reinterpret_cast<double const *>(&c); double *D = reinterpret_cast<double *>(&d); asm volatile("mma.sync.aligned.m16n8k16.row.col.f64.f64.f64.f64 {%0, %1, %2, %3}, {%4, %5, %6, %7, %8, %9, %10, %11}, {%12, %13, %14, %15}, {%16, %17, %18, %19};\n" : "=d"(D[0]), "=d"(D[1]), "=d"(D[2]), "=d"(D[3]) : "d"(A[0]), "d"(A[2]), "d"(A[2]), "d"(A[3]), "d"(A[4]), "d"(A[5]), "d"(A[6]), "d"(A[7]), "d"(B[0]), "d"(B[1]), "d"(B[2]), "d"(B[3]), "d"(C[0]), "d"(C[1]), "d"(C[2]), "d"(C[3])); #else CUTLASS_NOT_IMPLEMENTED(); #endif } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace arch } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/arch/mma_sm90.h/0
{ "file_path": "include/cutlass/arch/mma_sm90.h", "repo_id": "include", "token_count": 3148 }
17
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Basic include for CUTLASS BLAS3/HPC code. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/blas3_types.h" #include "cutlass/coord.h" #include "cutlass/complex.h" #include "cutlass/functional.h" #include "cutlass/numeric_types.h" //////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines FillMode inversions template <FillMode kFillMode> struct InvertFillMode; /// Invert FillMode lower to upper template <> struct InvertFillMode<FillMode::kLower> { static FillMode const mode = FillMode::kUpper; }; /// Invert FillMode upper to lower template <> struct InvertFillMode<FillMode::kUpper> { static FillMode const mode = FillMode::kLower; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines SideMode inversions template <SideMode kSideMode> struct InvertSideMode; /// Invert SideMode left to right template <> struct InvertSideMode<SideMode::kLeft> { static SideMode const mode = SideMode::kRight; }; /// Invert SideMode right to left template <> struct InvertSideMode<SideMode::kRight> { static SideMode const mode = SideMode::kLeft; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines correct compare operation for Triangular matrix boundary template <FillMode kFillMode, DiagType kDiagType = DiagType::kNonUnit> struct TrMatrixCompareOp { using Index = int32_t; using Type = typename platform::conditional< (kFillMode == FillMode::kLower), greater_equal<Index>, less_equal<Index>>::type; }; template <FillMode kFillMode> struct TrMatrixCompareOp <kFillMode, DiagType::kUnit> { using Index = int32_t; using Type = typename platform::conditional< (kFillMode == FillMode::kLower), greater_equal<Index>, less_equal<Index>>::type; }; template <FillMode kFillMode> struct TrMatrixCompareOp <kFillMode, DiagType::kZero> { using Index = int32_t; using Type = typename platform::conditional< (kFillMode == FillMode::kLower), greater<Index>, less<Index>>::type; }; //////////////////////////////////////////////////////////////////////////////////////////////////// // Returns precision in terms of bits (based on datatype) to fill tensors with. // Defaults to 5 bits of mantissa for TF32 and FP32 (with implicit round-offs). // Also defines acceptable mantissa result variance/error. template <typename Element> struct MantissaInBits { static int constexpr bits = 5; static double constexpr error = 1.0e-7; }; // Full precision is supported for FP64 template <> struct MantissaInBits<double> { static int constexpr bits = 30; static double constexpr error = 1.0e-15; }; template <> struct MantissaInBits<cutlass::complex<double>> { static int constexpr bits = 30; static double constexpr error = 1.0e-15; }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/blas3.h/0
{ "file_path": "include/cutlass/blas3.h", "repo_id": "include", "token_count": 1540 }
18
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once // common #include "cutlass/arch/mma.h" #include "cutlass/cutlass.h" #include "cutlass/arch/mma.h" #include "cutlass/trace.h" #include "cutlass/cluster_launch.hpp" #include "cutlass/device_kernel.h" #include "cutlass/conv/kernel/conv_universal.hpp" #include "cutlass/gemm/gemm.h" #include "cutlass/detail/layout.hpp" #include "cutlass/cuda_host_adapter.hpp" //////////////////////////////////////////////////////////////////////////////// namespace cutlass::conv::device { //////////////////////////////////////////////////////////////////////////////// /*! ConvUniversalAdapter is a stateful, reusable handle built around a kernel of type cutlass::conv::kernel::ConvUniversal. It manages the lifetime of the underlying `kernel::Params` struct, and exposes APIs to create it from the host facing arguments. For power users, static methods are exposed that bypass the stateful methods or args->params lowering. */ template <class ConvKernel_> class ConvUniversalAdapter { public: using ConvKernel = ConvKernel_; using TileShape = typename ConvKernel::TileShape; using ElementA = typename ConvKernel::ElementA; using ElementB = typename ConvKernel::ElementB; using ElementC = typename ConvKernel::ElementC; using ElementD = typename ConvKernel::ElementD; using ElementAccumulator = typename ConvKernel::TiledMma::ValTypeC; using DispatchPolicy = typename ConvKernel::DispatchPolicy; using CollectiveMainloop = typename ConvKernel::CollectiveMainloop; using CollectiveEpilogue = typename ConvKernel::CollectiveEpilogue; static bool const kEnableCudaHostAdapter = CUTLASS_ENABLE_CUDA_HOST_ADAPTER; // Tease out meta-information about the conv algorithm static constexpr conv::Operator kConvolutionalOperator = DispatchPolicy::ConvOp; static constexpr int NumSpatialDimensions = ConvKernel::NumSpatialDimensions; // If our TiledMMA's instruction thread layout size is larger than 1, we know its a tensorop! using OperatorClass = cute::conditional_t< (cute::size(typename ConvKernel::TiledMma::AtomThrID{}) > 1), cutlass::arch::OpClassTensorOp, cutlass::arch::OpClassSimt>; using ArchTag = typename ConvKernel::ArchTag; // Assume TiledMma's ShapeMNK is the same as 2.x's ThreadblockShape using ThreadblockShape = cutlass::gemm::GemmShape< cute::size<0>(TileShape{}), cute::size<1>(TileShape{}), cute::size<2>(TileShape{})>; using ClusterShape = cutlass::gemm::GemmShape< cute::size<0>(typename ConvKernel::DispatchPolicy::ClusterShape{}), cute::size<1>(typename ConvKernel::DispatchPolicy::ClusterShape{}), cute::size<2>(typename ConvKernel::DispatchPolicy::ClusterShape{})>; // Instruction shape is easy too, since we get that directly from our TiledMma's atom shape using InstructionShape = cutlass::gemm::GemmShape< cute::size<0>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{}), cute::size<1>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{}), cute::size<2>(typename CollectiveMainloop::TiledMma::AtomShape_MNK{})>; // Legacy: provide a correct warp count, but no reliable warp shape static int const kThreadCount = ConvKernel::MaxThreadsPerBlock; // Warp shape is not a primary API type in 3.x // But we can best approximate it by inspecting the TiledMma // For this, we make the assumption that we always have 4 warps along M, and rest along N, none along K // We also always round up the warp count to 4 if the tiled mma is smaller than 128 threads static constexpr int WarpsInMma = cute::max(4, CUTE_STATIC_V(cute::size(typename ConvKernel::TiledMma{})) / 32); static constexpr int WarpsInMmaM = 4; static constexpr int WarpsInMmaN = cute::ceil_div(WarpsInMma, WarpsInMmaM); using WarpCount = cutlass::gemm::GemmShape<WarpsInMmaM, WarpsInMmaN, 1>; using WarpShape = cutlass::gemm::GemmShape< CUTE_STATIC_V(cute::tile_size<0>(typename CollectiveMainloop::TiledMma{})) / WarpsInMmaM, CUTE_STATIC_V(cute::tile_size<1>(typename CollectiveMainloop::TiledMma{})) / WarpsInMmaN, CUTE_STATIC_V(cute::tile_size<2>(typename CollectiveMainloop::TiledMma{}))>; static int constexpr kStages = CollectiveMainloop::DispatchPolicy::Stages; // Inspect TiledCopy for A and B to compute the alignment size static int constexpr kAlignmentA = detail::get_alignment_count_from_gmem_tiled_copy< typename CollectiveMainloop::GmemTiledCopyA, ElementA>(); static int constexpr kAlignmentB = detail::get_alignment_count_from_gmem_tiled_copy< typename CollectiveMainloop::GmemTiledCopyB, ElementB>(); static int constexpr kAlignmentC = detail::get_alignment_count_from_gmem_tiled_copy< typename CollectiveEpilogue::GmemTiledCopyC, ElementC>(); static int constexpr kAlignmentD = detail::get_alignment_count_from_gmem_tiled_copy< typename CollectiveEpilogue::GmemTiledCopyD, ElementD>(); using EpilogueOutputOp = typename CollectiveEpilogue::ThreadEpilogueOp; /// Argument structure: User API using Arguments = typename ConvKernel::Arguments; /// Argument structure: Kernel API using Params = typename ConvKernel::Params; private: /// Kernel API parameters object Params params_; public: /// Access the Params structure Params const& params() const { return params_; } /// Determines whether the conv can execute the given problem. static Status can_implement(Arguments const& args) { if (ConvKernel::can_implement(args)) { return Status::kSuccess; } else { return Status::kInvalid; } } /// Gets the workspace size static size_t get_workspace_size(Arguments const& args) { size_t workspace_bytes = 0; CUTLASS_TRACE_HOST(" workspace_bytes: " << workspace_bytes); workspace_bytes += ConvKernel::get_workspace_size(args); return workspace_bytes; } /// Computes the grid shape static dim3 get_grid_shape(Arguments const& args, void* workspace = nullptr) { auto tmp_params = ConvKernel::to_underlying_arguments(args, workspace); return ConvKernel::get_grid_shape(tmp_params); } /// Computes the grid shape static dim3 get_grid_shape(Params const& params) { return ConvKernel::get_grid_shape(params); } /// Computes the maximum number of active blocks per multiprocessor static int maximum_active_blocks(int /* smem_capacity */ = -1) { CUTLASS_TRACE_HOST("ConvUniversal::maximum_active_blocks()"); int max_active_blocks = -1; int smem_size = ConvKernel::SharedStorageSize; // first, account for dynamic smem capacity if needed cudaError_t result; if (smem_size >= (48 << 10)) { CUTLASS_TRACE_HOST(" Setting smem size to " << smem_size); result = cudaFuncSetAttribute( device_kernel<ConvKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (cudaSuccess != result) { result = cudaGetLastError(); // to clear the error bit CUTLASS_TRACE_HOST( " cudaFuncSetAttribute() returned error: " << cudaGetErrorString(result)); return -1; } } // query occupancy after setting smem size result = cudaOccupancyMaxActiveBlocksPerMultiprocessor( &max_active_blocks, device_kernel<ConvKernel>, ConvKernel::MaxThreadsPerBlock, smem_size); if (cudaSuccess != result) { result = cudaGetLastError(); // to clear the error bit CUTLASS_TRACE_HOST( " cudaOccupancyMaxActiveBlocksPerMultiprocessor() returned error: " << cudaGetErrorString(result)); return -1; } CUTLASS_TRACE_HOST(" max_active_blocks: " << max_active_blocks); return max_active_blocks; } /// Initializes conv state from arguments. Status initialize( Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) { CUTLASS_TRACE_HOST("ConvUniversal::initialize() - workspace " << workspace << ", stream: " << (stream ? "non-null" : "null")); size_t workspace_bytes = ConvKernel::get_workspace_size(args); CUTLASS_TRACE_HOST(" workspace_bytes: " << workspace_bytes); if (workspace_bytes) { if (!workspace) { CUTLASS_TRACE_HOST(" error: device workspace must not be null"); return Status::kErrorWorkspaceNull; } CUTLASS_TRACE_HOST(" clearing device workspace"); cudaError_t result = cudaMemsetAsync(workspace, 0, workspace_bytes, stream); if (cudaSuccess != result) { result = cudaGetLastError(); // to clear the error bit CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result)); return Status::kErrorInternal; } } // Initialize the Params structure params_ = ConvKernel::to_underlying_arguments(args, workspace); // Don't set the function attributes - require the CudaHostAdapter to set it. if constexpr (kEnableCudaHostAdapter) { CUTLASS_ASSERT(cuda_adapter); return Status::kSuccess; } else { // account for dynamic smem capacity if needed int smem_size = ConvKernel::SharedStorageSize; if (smem_size >= (48 << 10)) { CUTLASS_TRACE_HOST(" Setting smem size to " << smem_size); cudaError_t result = cudaFuncSetAttribute( device_kernel<ConvKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (cudaSuccess != result) { result = cudaGetLastError(); // to clear the error bit CUTLASS_TRACE_HOST(" cudaFuncSetAttribute() returned error: " << cudaGetErrorString(result)); return Status::kErrorInternal; } } } return Status::kSuccess; } /// Update API is preserved in 3.0, but does not guarantee a lightweight update of params. Status update(Arguments const& args, void* workspace = nullptr) { CUTLASS_TRACE_HOST("ConvUniversal()::update() - workspace: " << workspace); size_t workspace_bytes = get_workspace_size(args); if (workspace_bytes > 0 && nullptr == workspace) { return Status::kErrorWorkspaceNull; } params_ = ConvKernel::to_underlying_arguments(args, workspace); return Status::kSuccess; } /// Primary run() entry point API that is static allowing users to create and manage their own params. /// Supplied params struct must be construct by calling ConvKernel::to_underling_arguments() static Status run(Params& params, cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) { CUTLASS_TRACE_HOST("ConvUniversal::run()"); dim3 const block = ConvKernel::get_block_shape(); dim3 const grid = get_grid_shape(params); // configure smem size and carveout int smem_size = ConvKernel::SharedStorageSize; Status launch_result; // Use extended launch API only for mainloops that use it if constexpr(ConvKernel::ArchTag::kMinComputeCapability >= 90) { dim3 cluster(cute::size<0>(typename ConvKernel::DispatchPolicy::ClusterShape{}), cute::size<1>(typename ConvKernel::DispatchPolicy::ClusterShape{}), cute::size<2>(typename ConvKernel::DispatchPolicy::ClusterShape{})); void* kernel_params[] = {&params}; if constexpr (kEnableCudaHostAdapter) { // // Use the cuda host adapter // CUTLASS_ASSERT(cuda_adapter); if (cuda_adapter) { launch_result = cuda_adapter->launch( grid, cluster, block, smem_size, stream, kernel_params, 0 ); } else { return Status::kErrorInternal; } } else { CUTLASS_ASSERT(cuda_adapter == nullptr); void const* kernel = (void const*) device_kernel<ConvKernel>; if constexpr (ConvKernel::ArchTag::kMinComputeCapability == 90) { launch_result = ClusterLauncher::launch( grid, cluster, block, smem_size, stream, kernel, kernel_params); } } } else { launch_result = Status::kSuccess; if constexpr (kEnableCudaHostAdapter) { CUTLASS_ASSERT(cuda_adapter); if (cuda_adapter) { void* kernel_params[] = {&params}; launch_result = cuda_adapter->launch( grid, block, smem_size, stream, kernel_params, 0 ); } else { return Status::kErrorInternal; } } else { CUTLASS_ASSERT(cuda_adapter == nullptr); device_kernel<ConvKernel><<<grid, block, smem_size, stream>>>(params); } } cudaError_t result = cudaGetLastError(); if (cudaSuccess == result && Status::kSuccess == launch_result) { return Status::kSuccess; } else { CUTLASS_TRACE_HOST(" Kernel launch failed. Reason: " << result); return Status::kErrorInternal; } } // // Non-static launch overloads that first create and set the internal params struct of this kernel handle. // /// Launches the kernel after first constructing Params internal state from supplied arguments. Status run( Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr ) { Status status = initialize(args, workspace, stream, cuda_adapter); if (Status::kSuccess == status) { status = run(params_, stream, cuda_adapter); } return status; } /// Launches the kernel after first constructing Params internal state from supplied arguments. Status operator()( Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr, CudaHostAdapter *cuda_adapter = nullptr) { return run(args, workspace, stream, cuda_adapter); } /// Overload that allows a user to re-launch the same kernel without updating internal params struct. Status run(cudaStream_t stream = nullptr) { return run(params_, stream); } /// Overload that allows a user to re-launch the same kernel without updating internal params struct. Status operator()(cudaStream_t stream = nullptr) { return run(params_, stream); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::conv::device ////////////////////////////////////////////////////////////////////////////////
include/cutlass/conv/device/conv_universal_adapter.hpp/0
{ "file_path": "include/cutlass/conv/device/conv_universal_adapter.hpp", "repo_id": "include", "token_count": 5681 }
19
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/kernel_hardware_info.hpp" #include "cute/tensor.hpp" #include "cute/arch/cluster_sm90.hpp" #include "cutlass/conv/convolution.h" #include "cutlass/conv/dispatch_policy.hpp" #include "cutlass/pipeline/sm90_pipeline.hpp" #include "cutlass/gemm/kernel/tile_scheduler.hpp" /////////////////////////////////////////////////////////////////////////////// namespace cutlass::conv::kernel { /////////////////////////////////////////////////////////////////////////////// template < class CollectiveMainloop_, class CollectiveEpilogue_, class TileSchedulerTag > class ConvUniversal< CollectiveMainloop_, CollectiveEpilogue_, TileSchedulerTag, cute::enable_if_t<cute::is_base_of_v<cutlass::conv::KernelImplicitTmaWarpSpecializedSm90, typename CollectiveMainloop_::DispatchPolicy::Schedule>>> { public: // // Type Aliases // // Mainloop derived types using CollectiveMainloop = CollectiveMainloop_; using TileShape = typename CollectiveMainloop::TileShape; using TiledMma = typename CollectiveMainloop::TiledMma; using ArchTag = typename CollectiveMainloop::ArchTag; using ElementA = typename CollectiveMainloop::ElementA; using StrideA = typename CollectiveMainloop::StrideA; using ElementB = typename CollectiveMainloop::ElementB; using StrideB = typename CollectiveMainloop::StrideB; using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; using ClusterShape = typename DispatchPolicy::ClusterShape; using MainloopArguments = typename CollectiveMainloop::Arguments; using MainloopParams = typename CollectiveMainloop::Params; static constexpr int NumSpatialDimensions = CollectiveMainloop::NumSpatialDimensions; static_assert(ArchTag::kMinComputeCapability >= 90); // Epilogue derived types using CollectiveEpilogue = CollectiveEpilogue_; using ElementC = typename CollectiveEpilogue::ElementC; using StrideC = typename CollectiveEpilogue::StrideC; using ElementD = typename CollectiveEpilogue::ElementD; using StrideD = typename CollectiveEpilogue::StrideD; using EpilogueArguments = typename CollectiveEpilogue::Arguments; using EpilogueParams = typename CollectiveEpilogue::Params; static_assert(cute::is_void_v<TileSchedulerTag>, "TMA warp-specialized kernel does not support specializing the tile scheduler."); using TileScheduler = typename cutlass::gemm::kernel::detail::TileSchedulerSelector< TileSchedulerTag, ArchTag, TileShape, ClusterShape>::Scheduler; using TileSchedulerArguments = typename TileScheduler::Arguments; // Kernel level shared memory storage struct SharedStorage { union TensorStorage { using MainloopTensorStorage = typename CollectiveMainloop::TensorStorage; using EpilogueTensorStorage = typename CollectiveEpilogue::TensorStorage; MainloopTensorStorage mainloop; EpilogueTensorStorage epilogue; } tensors; struct PipelineStorage : cute::aligned_struct<16> { using MainloopPipelineStorage = typename CollectiveMainloop::PipelineStorage; using EpiLoadPipelineStorage = typename CollectiveEpilogue::PipelineStorage; alignas(16) MainloopPipelineStorage mainloop; alignas(16) EpiLoadPipelineStorage epi_load; } pipelines; }; static constexpr int SharedStorageSize = sizeof(SharedStorage); static constexpr uint32_t NumLoadWarpGroups = 1; static constexpr uint32_t NumMmaWarpGroups = 1; static constexpr uint32_t MaxThreadsPerBlock = CUTE_STATIC_V(size(TiledMma{})) + (NumLoadWarpGroups * NumThreadsPerWarpGroup); static constexpr uint32_t MinBlocksPerMultiprocessor = 1; // Host facing host arguments struct Arguments { MainloopArguments mainloop{}; EpilogueArguments epilogue{}; KernelHardwareInfo hw_info{}; TileSchedulerArguments scheduler{}; }; // Kernel device entry point API struct Params { MainloopParams mainloop; EpilogueParams epilogue; }; // // Methods // // Map user facing arguments to device facing params static Params to_underlying_arguments(Arguments const& args, void* workspace) { (void) workspace; auto mainloop_params = CollectiveMainloop::to_underlying_arguments(args.mainloop, workspace); auto problem_shape_MNKL = append<4>(mainloop_params.problem_shape, Int<1>{}); return { mainloop_params, CollectiveEpilogue::to_underlying_arguments(problem_shape_MNKL, args.epilogue, workspace) }; } // Given arguemnts, returns true if the kernel can successfully compute upon them. False otherwise. static bool can_implement(Arguments const& args) { bool implementable = true; implementable &= CollectiveMainloop::can_implement(args.mainloop.problem_shape, args.mainloop); implementable &= CollectiveEpilogue::can_implement(args.mainloop.problem_shape.get_transformed_problem_shape_MNK(), args.epilogue); return implementable; } static size_t get_workspace_size(Arguments const& args) { return 0; } // Computes the kernel launch grid shape based on runtime parameters static dim3 get_grid_shape(Params const& params) { // The CONV mainloop params problem shape will be the cute::Shape<> rank-3 MNK tuple we want for grid planning // Although conv problems do not have an L mode, we add it here to comply with the scheduler API auto linear_problem_shape_MNKL = make_shape( size<0>(params.mainloop.problem_shape), // M mode is linearized. shape<1>(params.mainloop.problem_shape), shape<2>(params.mainloop.problem_shape), Int<1>{}); return cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::get_tiled_cta_shape_mnl( linear_problem_shape_MNKL, TileShape{}, ClusterShape{}); } static dim3 get_block_shape() { return dim3(MaxThreadsPerBlock, 1, 1); } CUTLASS_DEVICE void operator()(Params const& params, char* smem_buf) { using namespace cute; using X = Underscore; // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. #if ! defined(__CUDA_ARCH_FEAT_SM90_ALL) if constexpr(size<0>(typename TiledMma::AtomShape_MNK{}) == 64) { printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); return; } #endif enum class WarpGroupRole { Producer = 0, Consumer = 1, }; // Kernel level shared memory storage SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(smem_buf); int thread_idx = int(threadIdx.x); int warp_idx = canonical_warp_idx_sync(); int warp_group_thread_idx = thread_idx % NumThreadsPerWarpGroup; auto warp_group_role = WarpGroupRole(canonical_warp_group_idx()); int lane_predicate = cute::elect_one_sync(); // Issue Tma Descriptor Prefetch from a single thread if ((warp_idx == 0) && lane_predicate) { CollectiveMainloop::prefetch_tma_descriptors(params.mainloop); CollectiveEpilogue::prefetch_tma_descriptors(params.epilogue); } // Mainloop Load pipeline using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline; typename MainloopPipeline::Params mainloop_pipeline_params; if (warp_group_role == WarpGroupRole::Producer) { mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Producer; } if (warp_group_role == WarpGroupRole::Consumer) { mainloop_pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer; } mainloop_pipeline_params.is_leader = warp_group_thread_idx == 0; mainloop_pipeline_params.num_consumers = NumThreadsPerWarpGroup; mainloop_pipeline_params.transaction_bytes = CollectiveMainloop::TmaTransactionBytes; MainloopPipeline mainloop_pipeline(shared_storage.pipelines.mainloop, mainloop_pipeline_params, ClusterShape{}); // Epilogue Load pipeline using EpiLoadPipeline = typename CollectiveEpilogue::LoadPipeline; typename EpiLoadPipeline::Params epi_load_pipeline_params; if (warp_group_role == WarpGroupRole::Producer) { epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Producer; } if (warp_group_role == WarpGroupRole::Consumer) { epi_load_pipeline_params.role = EpiLoadPipeline::ThreadCategory::Consumer; } epi_load_pipeline_params.dst_blockid = cute::block_rank_in_cluster(); epi_load_pipeline_params.producer_arv_count = 1; // 1 thread issues TMA load epi_load_pipeline_params.consumer_arv_count = NumThreadsPerWarpGroup; epi_load_pipeline_params.transaction_bytes = CollectiveEpilogue::TmaTransactionBytes; EpiLoadPipeline epi_load_pipeline(shared_storage.pipelines.epi_load, epi_load_pipeline_params); // Epilogue Store pipeline using EpiStorePipeline = typename CollectiveEpilogue::StorePipeline; typename EpiStorePipeline::Params epi_store_pipeline_params; epi_store_pipeline_params.always_wait = true; EpiStorePipeline epi_store_pipeline(epi_store_pipeline_params); // Initialize starting pipeline states for the collectives // Epilogue store pipe is producer-only (consumer is TMA unit, waits via scoreboarding) typename CollectiveMainloop::PipelineState mainloop_pipe_consumer_state; typename CollectiveEpilogue::LoadPipelineState epi_load_pipe_consumer_state; // For the DMA Load (producer) we start with an opposite phase // i.e., we skip all waits since we know that the buffer is indeed empty PipelineState mainloop_pipe_producer_state = cutlass::make_producer_start_state<MainloopPipeline>(); PipelineState epi_load_pipe_producer_state = cutlass::make_producer_start_state<EpiLoadPipeline>(); PipelineState epi_store_pipe_producer_state = cutlass::make_producer_start_state<EpiStorePipeline>(); // Separate out problem shape for convenience auto M = get<0>(params.mainloop.problem_shape); auto N = get<1>(params.mainloop.problem_shape); auto K = get<2>(params.mainloop.problem_shape); // output strides are coalesced so we linearize the output shape to match the shape/stride profiles auto linear_problem_shape_MNKL = make_shape(size(M), N, K, Int<1>{}); // TMA requires special handling of strides to deal with coord codomain mapping // Represent the full tensors -- get these from TMA Tensor mA_mk = params.mainloop.tma_load_a.get_tma_tensor(make_shape(M, size(K))); Tensor mB_nk = params.mainloop.tma_load_b.get_tma_tensor(make_shape(N, K)); // Get the appropriate blocks for this thread block -- potential for thread block locality auto cta_tile_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) TiledMma tiled_mma; // Make tiled views, defer the slice Tensor gA_mk = local_tile(mA_mk, cta_tile_shape, make_coord(_,_,_), Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k) Tensor gB_nk = local_tile(mB_nk, cta_tile_shape, make_coord(_,_,_), Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k) // Compute m_coord, n_coord, and l_coord with their post-tiled shapes auto m_coord = idx2crd(int(blockIdx.x), shape<2>(gA_mk)); auto n_coord = idx2crd(int(blockIdx.y), shape<2>(gB_nk)); // The output shape M is linearized so the output coord M here should also be linearized. auto output_tile_coord = make_coord(int(blockIdx.x), n_coord, _, Int<0>{}); // Slice with m_coord and n_coord Tensor gA = gA_mk(_,_,m_coord,_); // (BLK_M,BLK_K,k) Tensor gB = gB_nk(_,_,n_coord,_); // (BLK_N,BLK_K,k) // Get pipeline iterators and increments from tensor shapes auto k_tile_iter = cute::make_coord_iterator(shape<2>(gA)); auto k_tile_count = size<2>(gA); auto c_tile_count = CollectiveEpilogue::get_load_pipe_increment(cta_tile_shape); auto d_tile_count = CollectiveEpilogue::get_store_pipe_increment(cta_tile_shape); // Make sure pipeline init is visible to all producers and consumer CTAs in cluster if constexpr (size(ClusterShape{}) > 1) { cute::cluster_arrive_relaxed(); cute::cluster_wait(); } else { __syncthreads(); } // In a warp specialized kernel, collectives expose data movement and compute operations separately CollectiveMainloop collective_mainloop; CollectiveEpilogue collective_epilogue{params.epilogue, shared_storage.tensors.epilogue}; if (warp_group_role == WarpGroupRole::Producer) { collective_mainloop.load( mainloop_pipeline, mainloop_pipe_producer_state, gA, params.mainloop.tma_load_a, gB, params.mainloop.tma_load_b, k_tile_iter, k_tile_count, thread_idx, shared_storage.tensors.mainloop ); // Update starting mainloop pipeline state for the pipeline drain mainloop_pipe_producer_state.advance(k_tile_count); // Make sure mainloop consumer has been waited upon before issuing epilogue load collective_mainloop.load_tail(mainloop_pipeline, mainloop_pipe_producer_state); if (collective_epilogue.is_producer_load_needed()) { collective_epilogue.load( epi_load_pipeline, epi_load_pipe_producer_state, linear_problem_shape_MNKL, cta_tile_shape, output_tile_coord, tiled_mma, warp_group_thread_idx, shared_storage.tensors.epilogue ); // Update starting load pipeline state for the pipeline drain epi_load_pipe_producer_state.advance(c_tile_count); collective_epilogue.load_tail(epi_load_pipeline, epi_load_pipe_producer_state); } } else if (warp_group_role == WarpGroupRole::Consumer) { Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(cta_tile_shape)); // (MMA,MMA_M,MMA_N) collective_mainloop.mma( mainloop_pipeline, mainloop_pipe_consumer_state, accumulators, k_tile_count, thread_idx, shared_storage.tensors.mainloop, params.mainloop ); // Make sure the math instructions are done and free buffers before entering the epilogue collective_mainloop.mma_tail( mainloop_pipeline, mainloop_pipe_consumer_state, k_tile_count ); // Epilogue and write to gD collective_epilogue.store( epi_load_pipeline, epi_load_pipe_consumer_state, epi_store_pipeline, epi_store_pipe_producer_state, linear_problem_shape_MNKL, cta_tile_shape, output_tile_coord, accumulators, tiled_mma, warp_group_thread_idx, shared_storage.tensors.epilogue ); } } }; /////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::conv::kernel
include/cutlass/conv/kernel/sm90_implicit_gemm_tma_warpspecialized.hpp/0
{ "file_path": "include/cutlass/conv/kernel/sm90_implicit_gemm_tma_warpspecialized.hpp", "repo_id": "include", "token_count": 6150 }
20
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of convolution tiles mapped to GEMM B (activation tile) matrix from memory. This iterator assumes TensorNHWC layout of tensors in Global Memory. The iterator is specialized for each of the three convolution operators: forward propagation (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad). */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/threadblock/conv2d_params.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename Element_, typename ThreadMap_, typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess> > class Conv2dWgradActivationTileAccessIteratorAnalytic { public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = layout::TensorNHWC; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using TensorRef = cutlass::TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic; static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided; static int const kConvDim = 2; using ConvProblemSize = typename conv::Conv2dProblemSize; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); static_assert(sizeof_bits<Element>::value >= 8, "WGRAD requires elements of size 8b or greater."); // // Parameters structure // using Params = Conv2dAnalyticParams<Layout>; private: Params const &params_; Conv2dProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; LongIndex iteration_vector_; char const *pointer_; // Filter postion (r,s,c) in contiguous dimension stays constant for each gemm_iteration_k int filter_r_[ThreadMap::Iterations::kContiguous]; int filter_s_[ThreadMap::Iterations::kContiguous]; int filter_c_[ThreadMap::Iterations::kContiguous]; int offset_npq_[ThreadMap::Iterations::kStrided]; public: CUTLASS_HOST_DEVICE Conv2dWgradActivationTileAccessIteratorAnalytic( Params const &params, Conv2dProblemSize const &problem_size, Element const *ptr, int thread_idx, MatrixCoord const &threadblock_offset = MatrixCoord() ): params_(params), problem_size_(problem_size), pointer_(reinterpret_cast<char const *>(ptr)) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); // initialize r,s,c filter position for every contiguous iteration CUTLASS_PRAGMA_UNROLL for(int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int rsc_offset = threadblock_offset.column() + thread_coord.contiguous() + c * ThreadMap::Delta::kContiguous; filter_r_[c] = rsc_offset / (problem_size_.S * problem_size_.C); int residual = rsc_offset % (problem_size_.S * problem_size_.C); filter_s_[c] = residual / problem_size_.C; filter_c_[c] = residual % problem_size_.C; } // initialize n, p, q offset for every strided iteration CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { offset_npq_[s] = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided; } } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_vector_ = index % kAccessesPerVector; int residual_access = index / kAccessesPerVector; iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous; iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_HOST_DEVICE void advance() { // moves to the next GEMM-K offset (offset_npq_) in GEMM-B by a CTA-K tile CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { offset_npq_[s] += Shape::kRow * problem_size_.split_k_slices; } } /// Returns the coordinate in the activation tensor x that is currently pointed to /// by the iterator. CUTLASS_HOST_DEVICE TensorCoord at() const { int r, s, c; if (kAccessesPerVector == 1) { /// One 128b aligned access fetching more than one element c = filter_c_[iteration_contiguous_]; r = filter_r_[iteration_contiguous_]; s = filter_s_[iteration_contiguous_]; } else { /// Multiple access to support non-128b alignment in contiguous dimension c = (filter_c_[iteration_contiguous_] + iteration_vector_ * AccessType::kElements) % problem_size_.C; int wrap_c = (filter_c_[iteration_contiguous_] + iteration_vector_ * AccessType::kElements) / problem_size_.C; s = (filter_s_[iteration_contiguous_] + wrap_c) % problem_size_.S; int wrap_s = (filter_s_[iteration_contiguous_] + wrap_c) / problem_size_.S; r = filter_r_[iteration_contiguous_] + wrap_s; } if (problem_size_.mode == Mode::kConvolution) { r = (problem_size_.R - 1 - r); s = (problem_size_.S - 1 - s); } int n = offset_npq_[iteration_strided_] / (problem_size_.P * problem_size_.Q); int residual = offset_npq_[iteration_strided_] % (problem_size_.P * problem_size_.Q); int p = residual / problem_size_.Q; int q = residual % problem_size_.Q; int h = p * problem_size_.stride_h - problem_size_.pad_h + r * problem_size_.dilation_h; int w = q * problem_size_.stride_w - problem_size_.pad_w + s * problem_size_.dilation_w; return TensorCoord(n, h, w, c); } /// Returns true if the current coordinate is within the activation tensor x CUTLASS_HOST_DEVICE bool valid() const { TensorCoord coord = at(); return coord.n() < problem_size_.N && coord.h() >= 0 && coord.h() < problem_size_.H && coord.w() >= 0 && coord.w() < problem_size_.W; } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_HOST_DEVICE AccessType const *get() const { TensorCoord coord = at(); LongIndex offset = params_.layout(coord); return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8); } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv2dWgradActivationTileAccessIteratorAnalytic &operator++() { ++iteration_vector_; if (iteration_vector_ < kAccessesPerVector) { return *this; } iteration_vector_ = 0; ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(Conv2dProblemSize const &problem_size) { // check alignment constraint on iterator's contiguous dimension if (problem_size.C % AccessType::kElements) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/conv/threadblock/conv2d_wgrad_activation_tile_access_iterator_analytic.h/0
{ "file_path": "include/cutlass/conv/threadblock/conv2d_wgrad_activation_tile_access_iterator_analytic.h", "repo_id": "include", "token_count": 3446 }
21
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile) matrix from memory. This iterator assumes TensorNDHWC layout of tensors in Global Memory. The iterator is specialized for each of the three convolution operators: forward propagation (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad). */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv3d_problem_size.h" #include "cutlass/conv/threadblock/conv3d_params.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename Element_, typename ThreadMap_ > class Conv3dWgradOutputGradientTileAccessIteratorOptimized { public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = layout::TensorNDHWC; using ThreadMap = ThreadMap_; using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; using TensorRef = cutlass::TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized; static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided; static int const kConvDim = 3; using ConvProblemSize = typename conv::Conv3dProblemSize; static int const kAccessesPerVector = 1; static_assert(sizeof_bits<Element>::value >= 8, "WGRAD requires elements of size 8b or greater."); // // Parameters structure // struct Params : Conv3dWgradOutputGradientIteratorOptimizedParams { // // Methods // CUTLASS_HOST_DEVICE Params() {} CUTLASS_HOST_DEVICE Params(Conv3dWgradOutputGradientIteratorOptimizedParams const &base) : Conv3dWgradOutputGradientIteratorOptimizedParams(base) {} CUTLASS_HOST_DEVICE Params(Conv3dProblemSize const &problem_size, Layout const &layout) : Conv3dWgradOutputGradientIteratorOptimizedParams( problem_size, layout, sizeof_bits<Element>::value, {Shape::kRow, Shape::kColumn}, ThreadMap::kThreads, ThreadMap::kElementsPerAccess, {ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided}, {ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided}) {} }; private: Params const &params_; Conv3dProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; char const *pointer_; uint32_t predicates_; int filter_k_; int offset_nzpq_; public: CUTLASS_HOST_DEVICE Conv3dWgradOutputGradientTileAccessIteratorOptimized( Params const &params, Conv3dProblemSize const &problem_size, Element const *ptr, int thread_idx, MatrixCoord const &threadblock_offset = MatrixCoord() ): params_(params), problem_size_(problem_size), pointer_(reinterpret_cast<char const *>(ptr)), predicates_(0), filter_k_(0), offset_nzpq_(0) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); filter_k_ = threadblock_offset.row() + thread_coord.contiguous(); offset_nzpq_ = threadblock_offset.column() + thread_coord.strided(); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int filter_k = filter_k_ + c * ThreadMap::Delta::kContiguous; int offset_nzpq = offset_nzpq_ + s * ThreadMap::Delta::kStrided; bool predicate = valid_(at_(offset_nzpq, filter_k)); uint32_t pred = (predicate ? 1u : 0); int pred_idx = c + s * ThreadMap::Iterations::kContiguous; predicates_ |= (pred << pred_idx); } } // Offset pointer to (iteration_strided_, iteration_contiguous_) = (0, 0) pointer_ += ( offset_nzpq_ * params.layout.stride()[0] + filter_k_ ) * sizeof_bits<Element>::value / 8; set_iteration_index(0); } CUTLASS_HOST_DEVICE static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) { return Params(problem_size, layout); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; iteration_strided_ = index / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_HOST_DEVICE void advance() { // moves to the next GEMM-K offset (offset_npq_) in GEMM-A by a CTA-K tile offset_nzpq_ += Shape::kColumn * problem_size_.split_k_slices; // Clear predicates if needed CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { if (offset_nzpq_ + s * ThreadMap::Delta::kStrided >= params_.NZPQ) { uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous); predicates_ = (predicates_ & (~kClearMask)); } } pointer_ += params_.inc_next_nzpq; } private: /// Returns the coordinate in the output gradient tensor Dy that is (offset_nzpq, k) pointed to /// by the iterator. CUTLASS_HOST_DEVICE TensorCoord at_(int offset_nzpq, int k) const { // The subseqnet fast_divmod() operations are equivalent to the following logical computation: // // // int nzpq = offset_nzpq_; // int n = nzpq / (problem_size_.Z * problem_size_.P * problem_size_.Q); // int residual = nzpq % (problem_size_.Z * problem_size_.P * problem_size_.Q); // // int z = residual / (problem_size_.P * problem_size_.Q); // residual = residual % (problem_size_.P * problem_size_.Q); // // int p = residual / problem_size_.Q; // int q = residual % problem_size_.Q; int residual, n, z, p, q; fast_divmod(n, residual, offset_nzpq, params_.ZPQ, params_.zpq_mul, params_.zpq_shr); fast_divmod(z, residual, residual, params_.PQ, params_.pq_mul, params_.pq_shr); fast_divmod(p, q, residual, problem_size_.Q, params_.q_mul, params_.q_shr); return TensorCoord(n, z, p, q, k); } /// Returns true if the coord is within the output gradient tensor Dy CUTLASS_HOST_DEVICE bool valid_(TensorCoord coord) const { return coord.n() < problem_size_.N && coord.c() < problem_size_.K; } public: /// Returns true if the current coordinate is within the output gradient tensor Dy CUTLASS_HOST_DEVICE bool valid() const { LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous; return (predicates_ & (1u << pred_idx)); } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_HOST_DEVICE AccessType const *get() const { return reinterpret_cast<AccessType const *>( pointer_ + iteration_strided_ * params_.offset_next_strided + iteration_contiguous_ * params_.offset_next_contiguous ); } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv3dWgradOutputGradientTileAccessIteratorOptimized &operator++() { ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(Conv3dProblemSize const &problem_size) { // check alignment constraint on iterator's contiguous dimension if (problem_size.K % (128/sizeof_bits<Element>::value)) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_optimized.h/0
{ "file_path": "include/cutlass/conv/threadblock/conv3d_wgrad_output_gradient_tile_access_iterator_optimized.h", "repo_id": "include", "token_count": 3686 }
22
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing warp-level matrix multiply-accumulate operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/mma.h" #include "cutlass/gemm/thread/mma.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/thread/depthwise_mma.h" #include "cutlass/gemm/warp/mma_simt_tile_iterator.h" #include "cutlass/gemm/warp/mma_simt_policy.h" #include "cutlass/gemm/warp/mma_simt.h" #include "cutlass/conv/warp/mma_depthwise_simt_tile_iterator.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Number of partitions along K dimension int PartitionsK = 1, /// Complex transformation on operand A ComplexTransform TransformA = ComplexTransform::kNone, /// Complex transformation on operand B ComplexTransform TransformB = ComplexTransform::kNone, /// Used for partial specialization typename Enable = bool> class MmaDepthwiseSimt : public cutlass::gemm::warp:: MmaSimt<Shape_, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, Policy_> { using Base = cutlass::gemm::warp:: MmaSimt<Shape_, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, Policy_>; public: /// Shape of warp-level matrix operation (concept: GemmShape) using Shape = Shape_; /// Data type of multiplicand A using ElementA = ElementA_; /// Layout of multiplicand A using LayoutA = LayoutA_; /// Data type of multiplicand B using ElementB = ElementB_; /// Layout of multiplicand B using LayoutB = LayoutB_; /// Data type of accumulator matrix C using ElementC = ElementC_; /// Layout of accumulator matrix C using LayoutC = LayoutC_; /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) using Policy = Policy_; /// Indicates class of matrix operator using OperatorClass = arch::OpClassSimt; /// Hard-coded for now using ArchTag = arch::Sm50; /// Complex transform on A operand static ComplexTransform const kTransformA = TransformA; /// Complex transform on B operand static ComplexTransform const kTransformB = TransformB; public: /// Iterates over the B operand in memory using IteratorB = cutlass::conv::warp::DepthwiseMmaSimtTileIterator< MatrixShape<Policy::LaneMmaShape::kK, Shape::kN>, cutlass::gemm::Operand::kB, ElementB, LayoutB, Policy, PartitionsK, Shape::kK >; /// Storage for B tile using FragmentB = typename IteratorB::Fragment; /// Storage for transformed A tile using TransformedFragmentB = FragmentB; public: // // Methods // /// Ctor CUTLASS_DEVICE MmaDepthwiseSimt():Base() {} }; /// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width> typename FilterShape_, /// Shape of the output tile computed by thread- concept: conv::TensorNHWCShape<> typename ThreadOutputShape_, /// Shape of the output tile computed by threadblock - concept: conv::TensorNHWCShape<> typename ThreadBlockOutputShape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Shape of the warp in units of thread (concept: MmaSimtPolicy) typename Policy_, /// Iterator algo type conv::IteratorAlgorithm IteratorAlgorithm_ = IteratorAlgorithm::kAnalytic, /// Stride ( MatrixShape<Height, Width> ) typename StrideShape_ = cutlass::MatrixShape<-1, -1>, /// Dilation ( MatrixShape<Height, Width> ) typename DilationShape_ = cutlass::MatrixShape<-1, -1>, /// Activation Shape loaded by threadblock typename ActivationShape_ = cutlass::conv::TensorNHWCShape<-1,-1,-1,-1>, /// Number of partitions along K dimension int PartitionsK = 1, /// Complex transformation on operand A ComplexTransform TransformA = ComplexTransform::kNone, /// Complex transformation on operand B ComplexTransform TransformB = ComplexTransform::kNone, /// Used for partial specialization typename Enable = bool> class MmaDepthwiseDirectConvSimt { public: /// Shape of warp-level matrix operation (concept: GemmShape) using Shape = Shape_; /// Shape of filter shape per threadblock - concept: gemm::GemmShape<Depth, Height, Width> using FilterShape = FilterShape_; /// Shape of the output tile computed by thread- concept: conv::TensorNHWCShape<> using ThreadOutputShape = ThreadOutputShape_; /// Shape of the output tile computed by threadblock - concept: conv::TensorNHWCShape<> using ThreadBlockOutputShape = ThreadBlockOutputShape_; /// Data type of multiplicand A using ElementA = ElementA_; /// Layout of multiplicand A using LayoutA = LayoutA_; /// Data type of multiplicand B using ElementB = ElementB_; /// Layout of multiplicand B using LayoutB = LayoutB_; /// Data type of accumulator matrix C using ElementC = ElementC_; /// Layout of accumulator matrix C using LayoutC = LayoutC_; /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) using Policy = Policy_; /// Iterator algo type static conv::IteratorAlgorithm const IteratorAlgorithm = IteratorAlgorithm_; /// Stride ( MatrixShape<Height, Width> ) using StrideShape = StrideShape_; /// Dilation ( MatrixShape<Height, Width> ) using DilationShape = DilationShape_; /// Activation Shape loaded by threadblock using ActivationShape = ActivationShape_; /// Indicates class of matrix operator using OperatorClass = arch::OpClassSimt; /// Hard-coded for now using ArchTag = arch::Sm50; /// Complex transform on A operand static ComplexTransform const kTransformA = TransformA; /// Complex transform on B operand static ComplexTransform const kTransformB = TransformB; static constexpr bool use_dp4a = (platform::is_same< layout::ColumnMajorInterleaved<4>, LayoutA>::value || platform::is_same< layout::RowMajorInterleaved<4>, LayoutA >::value) && platform::is_same< ElementA, int8_t >::value && platform::is_same< ElementB, int8_t >::value; using dp4a_type = typename platform::conditional< use_dp4a , int8_t, bool >::type; /// Thread-level matrix multiply accumulate operator using ThreadMma = cutlass::conv::thread::DepthwiseDirectConvElementwiseInnerProduct< cutlass::gemm::GemmShape< Shape::kM / Policy::WarpShape::kRow, // number of output pixels proccessed per thread Shape::kN / Policy::WarpShape::kColumn, // number of channels proccessed per thread 1>, ElementA, ElementB, ElementC, arch::OpMultiplyAdd, dp4a_type >; /// Underlying matrix multiply operator (concept: arch::Mma) using ArchMmaOperator = typename ThreadMma::ArchMmaOperator; /// Indicates math operator using MathOperator = typename ArchMmaOperator::Operator; /// Shape of the underlying instruction using InstructionShape = cutlass::gemm::GemmShape<1,1,use_dp4a ? 4 : 1>; public: /// Iterates over the A operand in memory using IteratorA = cutlass::conv::warp::DepthwiseDirect2dConvSimtTileIterator< MatrixShape<Shape::kM, Shape::kN>, // <output tile=(P*Q), output channels> per warp FilterShape, ThreadOutputShape, ThreadBlockOutputShape, cutlass::gemm::Operand::kA, ElementA, Policy, IteratorAlgorithm, StrideShape, DilationShape, ActivationShape, PartitionsK, Shape::kK >; /// Storage for A tile using FragmentA = typename IteratorA::Fragment; /// Storage for transformed A tile using TransformedFragmentA = FragmentA; /// Iterates over the B operand in memory using IteratorB = cutlass::gemm::warp::MmaSimtTileIterator< MatrixShape<1, Shape::kN>, cutlass::gemm::Operand::kB, ElementB, LayoutB, Policy, PartitionsK, Shape::kK >; /// Storage for B tile using FragmentB = typename IteratorB::Fragment; /// Storage for transformed A tile using TransformedFragmentB = FragmentB; /// Iterates over the C operand in memory using IteratorC = cutlass::gemm::warp::MmaSimtTileIterator< MatrixShape<Shape::kM, Shape::kN>, cutlass::gemm::Operand::kC, ElementC, LayoutC, Policy >; /// Storage for C tile using FragmentC = typename ThreadMma::FragmentC; public: // // Methods // /// Ctor CUTLASS_DEVICE MmaDepthwiseDirectConvSimt() {} /// Performs a warp-level matrix multiply-accumulate operation CUTLASS_DEVICE void operator()( FragmentC &d, FragmentA a, FragmentB b, FragmentC const &c, int group_idx = 0) const { ThreadMma mma; mma(d, a, b, c); } /// Transform the mma operands to the required types CUTLASS_DEVICE void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, FragmentA const &A, FragmentB const &B) const { dst_A = A; dst_B = B; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace conv } // namespace cutlass
include/cutlass/conv/warp/mma_depthwise_simt.h/0
{ "file_path": "include/cutlass/conv/warp/mma_depthwise_simt.h", "repo_id": "include", "token_count": 4004 }
23
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing conversion operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Converts the result without other operations /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class Convert { public: using ElementOutput = ElementOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementAccumulator_; static int const kCount = Count; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using ComputeFragment = FragmentAccumulator; static FloatRoundStyle const kRound = Round; static bool const kIsHeavy = false; /// Host-constructable parameters structure struct Params { // // Methods // CUTLASS_HOST_DEVICE Params() {} }; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE Convert(Params const &params = Params()) { } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { } /// Returns true if source is needed based on state of runtime arguments CUTLASS_HOST_DEVICE constexpr bool is_source_needed() const { return false; } /// Constexpr function to enable the compiler to optimize away the source loading if it is /// never needed. CUTLASS_HOST_DEVICE constexpr bool is_source_ever_needed() const { return false; } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &accumulator, FragmentOutput const &source = FragmentOutput(), ElementCompute uniform = ElementCompute(0)) const { // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementAccumulator, kCount, Round> destination_converter; return destination_converter(accumulator); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
include/cutlass/epilogue/thread/conversion_op.h/0
{ "file_path": "include/cutlass/epilogue/thread/conversion_op.h", "repo_id": "include", "token_count": 1313 }
24
/*************************************************************************************************** * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default configuration for epilogue computing absolute maximum of output and auxiliary outputs. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/epilogue/threadblock/epilogue_with_absmax.h" #include "cutlass/layout/permute.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for absolute-maximum-computing epilogues with TensorOps template < typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename ElementOutput, typename ElementAuxOutput, typename ElementVector, typename OutputOp, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute > struct DefaultEpilogueWithAbsMax { /// Use defaults related to the existing epilogue using Base = DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, PartitionsK, OutputOp, ElementsPerAccess >; // // Stores the output // using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout >; // // Stores the auxiliary output // using AuxOutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< typename Base::OutputTileThreadMap, ElementAuxOutput, ScatterD, PermuteDLayout >; /// Define the epilogue using Epilogue = EpilogueWithAbsMax< Shape, WarpMmaTensorOp, PartitionsK, OutputTileIterator, AuxOutputTileIterator, ElementVector, typename Base::AccumulatorFragmentIterator, typename Base::WarpTileIterator, typename Base::SharedLoadIterator, OutputOp, typename Base::Padding, Base::kFragmentsPerIteration >; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/epilogue/threadblock/default_epilogue_with_absmax.h/0
{ "file_path": "include/cutlass/epilogue/threadblock/default_epilogue_with_absmax.h", "repo_id": "include", "token_count": 1219 }
25
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMM/CONV to store accumulator in shared memory after applying scale, bias loaded from global memory and element-wise operations. This Epilogue is typically used in fused GEMM/CONV to stage the intermediate accumulator. */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/functional.h" #include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_tensor_op.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator template < typename SmemTileIterator_, ///< Shared memory Tile iterator to output to shared memory typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename ScaleBiasIterator_, ///< Iterator to load scale and bias from global memory typename OutputOp_ ///< Output operator > class EpilogueSmemAccumulator { public: using SmemTileIterator = SmemTileIterator_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using ScaleBiasIterator = ScaleBiasIterator_; using OutputOp = OutputOp_; /// Fragment of accumulator tile using FragmentAccumulator = typename AccumulatorFragmentIterator::Fragment; /// The complete warp-level accumulator tile using AccumulatorTile = typename AccumulatorFragmentIterator::AccumulatorTile; /// Fragment of Scale and Bias loaded from global memory using FragmentScaleBias = typename ScaleBiasIterator::Fragment; static const bool PerChannelScale = (OutputOp::kScale == epilogue::thread::ScaleType::OnlyAlphaPerChannelScaling); /// Constructor CUTLASS_DEVICE EpilogueSmemAccumulator() {} /// Streams the result to shared memory CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator SmemTileIterator smem_iterator, ///< Tile iterator for destination in shared memory AccumulatorTile const &accumulator, ///< Complete warp-level accumulator tile ScaleBiasIterator scale_iterator, ///< iterator for scale vector in global memory ScaleBiasIterator bias_iterator) { ///< iterator for bias vector in global memory // Fragment to load scale bias from global memory FragmentScaleBias tb_frag_scale; FragmentScaleBias tb_frag_bias; /// Fragment Iterator to load slice of accumulator tile AccumulatorFragmentIterator frag_iterator_accum(accumulator); FragmentAccumulator tb_frag_accum; /// Epilogue output fragment typename SmemTileIterator::Fragment tb_frag_smem; /// Load scale and bias from global memory if(PerChannelScale) scale_iterator.load(tb_frag_scale); bias_iterator.load(tb_frag_bias); /// Iterate over the accumulator tile and store to shared memory CUTLASS_PRAGMA_UNROLL for (int rid = 0; rid < AccumulatorFragmentIterator::TileIterations::kRow; ++rid) { CUTLASS_PRAGMA_UNROLL for (int cid = 0; cid < AccumulatorFragmentIterator::TileIterations::kColumn; ++cid) { using AccumulatorAccessType = typename OutputOp::FragmentAccumulator; using ScaleBiasAccessType = typename OutputOp::FragmentScaleBias; using FragmentSmemAccessType = typename OutputOp::FragmentOutput; ScaleBiasAccessType const * scale_frag_ptr = reinterpret_cast<ScaleBiasAccessType const *>(&tb_frag_scale); ScaleBiasAccessType const * bias_frag_ptr = reinterpret_cast<ScaleBiasAccessType const *>(&tb_frag_bias); FragmentSmemAccessType * smem_frag_ptr = reinterpret_cast<FragmentSmemAccessType *>(&tb_frag_smem); CUTLASS_PRAGMA_UNROLL for (int idx = 0; idx < AccumulatorFragmentIterator::kIterationsPerTile; ++idx) { frag_iterator_accum.load(tb_frag_accum); ++frag_iterator_accum; AccumulatorAccessType const * accumulator_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&tb_frag_accum); const int kOutputIterations = FragmentAccumulator::kElements / OutputOp::kCount; CUTLASS_PRAGMA_UNROLL for (int it = 0; it < kOutputIterations; it++) { smem_frag_ptr[idx * kOutputIterations + it] = output_op(accumulator_frag_ptr[it], scale_frag_ptr[cid * kOutputIterations + it], bias_frag_ptr[cid * kOutputIterations + it]); } } smem_iterator.store(tb_frag_smem); ++smem_iterator; } } } /// Streams the result to shared memory CUTLASS_DEVICE void operator()( OutputOp const &output_op, ///< Output operator SmemTileIterator smem_iterator, ///< Tile iterator for destination in shared memory AccumulatorTile const &accumulator) { ///< Complete warp-level accumulator tile /// Fragment Iterator to load slice of accumulator tile AccumulatorFragmentIterator frag_iterator_accum(accumulator); FragmentAccumulator tb_frag_accum; /// Epilogue output fragment typename SmemTileIterator::Fragment tb_frag_smem; /// Iterate over the accumulator tile and store to shared memory CUTLASS_PRAGMA_UNROLL for (int rid = 0; rid < AccumulatorFragmentIterator::TileIterations::kRow; ++rid) { CUTLASS_PRAGMA_UNROLL for (int cid = 0; cid < AccumulatorFragmentIterator::TileIterations::kColumn; ++cid) { using AccumulatorAccessType = typename OutputOp::FragmentAccumulator; using FragmentSmemAccessType = typename OutputOp::FragmentOutput; FragmentSmemAccessType * smem_frag_ptr = reinterpret_cast<FragmentSmemAccessType *>(&tb_frag_smem); CUTLASS_PRAGMA_UNROLL for (int idx = 0; idx < AccumulatorFragmentIterator::kIterationsPerTile; ++idx) { frag_iterator_accum.load(tb_frag_accum); ++frag_iterator_accum; AccumulatorAccessType const * accumulator_frag_ptr = reinterpret_cast<AccumulatorAccessType const *>(&tb_frag_accum); const int kOutputIterations = FragmentAccumulator::kElements / OutputOp::kCount; CUTLASS_PRAGMA_UNROLL for (int it = 0; it < kOutputIterations; it++) { smem_frag_ptr[idx * kOutputIterations + it] = output_op(accumulator_frag_ptr[it]); } } smem_iterator.store(tb_frag_smem); ++smem_iterator; } } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/epilogue/threadblock/epilogue_smem_accumulator.h/0
{ "file_path": "include/cutlass/epilogue/threadblock/epilogue_smem_accumulator.h", "repo_id": "include", "token_count": 3246 }
26
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Metaprogram for determining the mapping of output elements to threads for epilogue tiles. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/tensor_ref.h" #include "cutlass/fast_math.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tuple defining point in output tile template < int Column, int Row, int Group, int Cluster, int Tile > struct OutputTileShape { static int const kColumn = Column; static int const kRow = Row; static int const kGroup = Group; static int const kCluster = Cluster; static int const kTile = Tile; static int const kCount = kColumn * kRow * kGroup * kCluster * kTile; }; //////////////////////////////////////////////////////////////////////////////// template <typename Iterations, typename Delta> struct OutputTileThreadMapHelpers { /// Determines the iteration index of a vector access according to the thread map CUTLASS_HOST_DEVICE static void iteration_index( int &column_idx, int &row_idx, int &group_idx, int &cluster_idx, int &tile_idx, int iter_idx) { column_idx = iter_idx % Iterations::kColumn; int residual = iter_idx / Iterations::kColumn; row_idx = residual % Iterations::kRow; residual = residual / Iterations::kRow; group_idx = residual % Iterations::kGroup; residual = residual / Iterations::kGroup; cluster_idx = residual % Iterations::kCluster; tile_idx = residual / Iterations::kCluster; } /// Computes the offset of a given vector access CUTLASS_HOST_DEVICE static MatrixCoord iteration_offset(int iter_idx) { int column_idx; int row_idx; int group_idx; int cluster_idx; int tile_idx; iteration_index(column_idx, row_idx, group_idx, cluster_idx, tile_idx, iter_idx); return MatrixCoord( row_idx * Delta::kRow + group_idx * Delta::kGroup + cluster_idx * Delta::kCluster + tile_idx * Delta::kTile, column_idx * Delta::kColumn); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ThreadMap_, typename Shape_, typename Iterations_, typename Delta_, typename Count_ > struct OutputTileThreadMap : public OutputTileThreadMapHelpers<Iterations_, Delta_> { /// Conventional thread map (concept: ThreadMap) using ThreadMap = ThreadMap_; /// Number of threads participating in the operation static int const kThreads = ThreadMap::kThreads; /// Number of scalar elements per access static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; /// Shape of the tile using Shape = Shape_; /// Iterations performed by each thread using Iterations = Iterations_; /// Delta between accesses using Delta = Delta_; /// Number of iterator iterations using Count = Count_; /// Initial offset function CUTLASS_HOST_DEVICE static MatrixCoord initial_offset(int thread_idx) { using Index = typename layout::PitchLinearCoord::Index; layout::PitchLinearCoord coord = ThreadMap::initial_offset(thread_idx); Index cluster = coord.strided() / (Shape::kGroup * Shape::kRow); Index cluster_residual = coord.strided() % (Shape::kGroup * Shape::kRow); Index group = cluster_residual / (Shape::kRow); Index row = cluster_residual % (Shape::kRow); return MatrixCoord{ row + group * Shape::kRow * Count::kRow + cluster * Shape::kGroup * Count::kGroup * Shape::kRow * Count::kRow, coord.contiguous() }; } }; //////////////////////////////////////////////////////////////////////////////// namespace detail { /// RowArrangement determines how one or more warps cover a region of consecutive rows. template < typename Shape, int WarpsRemaining, int ElementsPerAccess, int ElementSize, bool Is2dTile > struct RowArrangement; /// RowArrangement in which each warp's access is a 1D tiled arrangement. template < typename Shape, int WarpsRemaining, int ElementsPerAccess, int ElementSize > struct RowArrangement<Shape, WarpsRemaining, ElementsPerAccess, ElementSize, false> { static int const kWarpSize = 32; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; static int const kIterationsRow = 1; static int const kDeltaRow = 1; static int const kIterationsColumn = Shape::kColumn / kElementsPerAccess / kWarpSize; static int const kDeltaColumn = kWarpSize * kElementsPerAccess; static int const kAccessWidth = kWarpSize; static int const kAccessRows = 1; static int const kWarpPartitionsRow = 1; static int const kWarpPartitionsColumn = WarpsRemaining; }; /// RowArrangement in which each warp's access is a 2D tiled arrangement. template < typename Shape, int WarpsRemaining, int ElementsPerAccess, int ElementSize > struct RowArrangement<Shape, WarpsRemaining, ElementsPerAccess, ElementSize, true> { static int const kMemoryAccessSize = 256; // Preferred access size static int const kWarpSize = 32; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; struct Detail { static int const kShapeRow = Shape::kRow / WarpsRemaining; static int const kShapeWidth = Shape::kColumn / kElementsPerAccess; static int const kTargetMemoryAccessWidth = kMemoryAccessSize / (kElementsPerAccess * kElementSize / 8); static int const kTargetAccessRows = kWarpSize / kTargetMemoryAccessWidth; }; static int const kAccessWidth = (Detail::kTargetAccessRows > Detail::kShapeRow ? kWarpSize / Detail::kShapeRow : const_min( Detail::kShapeWidth, const_min(kWarpSize, kMemoryAccessSize / (kElementsPerAccess * kElementSize / 8)) )); static int const kAccessRows = (Detail::kTargetAccessRows > Detail::kShapeRow ? Detail::kShapeRow : const_min(Shape::kRow, kWarpSize / kAccessWidth)); static int const kIterationsRow = Detail::kShapeRow / kAccessRows; static int const kDeltaRow = kAccessRows; static int const kIterationsColumn = Detail::kShapeWidth / kAccessWidth; static int const kDeltaColumn = kAccessWidth * kElementsPerAccess; static_assert( kAccessWidth * kElementsPerAccess <= Shape::kColumn, "Accessing too many elements per access"); static_assert( kIterationsColumn > 0, "Iteration Count Column must be > 0" ); static_assert( kIterationsRow > 0, "Iteration Count Row must be > 0" ); static int const kWarpPartitionsRow = 1; static int const kWarpPartitionsColumn = 1; }; } //////////////////////////////////////////////////////////////////////////////// /// Template metaprogram for partitioning a 4D space across warps to achieve several performance /// objectives: /// /// - coalesced memory accesses in units of 128 Byte lines /// - minimal address arithmetic /// - minimal predicate calculations /// template < typename Shape_, typename Count_, int Threads, int ElementsPerAccess, int ElementSize > struct OutputTileOptimalThreadMap { using Shape = Shape_; using Count = Count_; static int const kWarpSize = 32; static int const kThreads = Threads; static int const kWarpCount = kThreads / kWarpSize; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; // // Metaprogram computation // struct Detail { // Clusters static int const kIterationsCluster = ((Shape::kCluster > kWarpCount) ? Shape::kCluster / kWarpCount : 1); static int const kDeltaCluster = ((Shape::kCluster > kWarpCount) ? Shape::kRow * Count::kRow * Shape::kGroup * Count::kGroup * Shape::kCluster / kIterationsCluster : 1); static int const kCompactedDeltaCluster = ((Shape::kCluster > kWarpCount) ? Shape::kRow * Shape::kGroup * Shape::kCluster / kIterationsCluster : 1); static int const kWarpPartitionsCluster = ((Shape::kCluster > kWarpCount) ? kWarpCount : kWarpCount / Shape::kCluster); static int const kWarpsRemainingForGroups = ((Shape::kCluster > kWarpCount) ? 1 : kWarpCount / Shape::kCluster); // Groups static int const kIterationsGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? Shape::kGroup / kWarpsRemainingForGroups : 1); static int const kDeltaGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? Shape::kRow * Count::kRow * Shape::kGroup / kIterationsGroup : 1); static int const kCompactedDeltaGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? Shape::kRow * Shape::kGroup / kIterationsGroup : 1); static int const kWarpPartitionsGroup = ((Shape::kGroup > kWarpsRemainingForGroups) ? 1 : kWarpsRemainingForGroups / Shape::kGroup); static int const kWarpsRemainingForRows = ((Shape::kGroup > kWarpsRemainingForGroups) ? 1 : kWarpsRemainingForGroups / Shape::kGroup); // Rows using RowArrangement = detail::RowArrangement< Shape, kWarpsRemainingForRows, kElementsPerAccess, kElementSize, (Shape::kRow > kWarpsRemainingForRows) >; // Warp partitions using WarpPartitions = OutputTileShape< RowArrangement::kWarpPartitionsColumn, RowArrangement::kWarpPartitionsRow, kWarpPartitionsGroup, kWarpPartitionsCluster, 1>; static int const kAccessWidth = RowArrangement::kAccessWidth; static int const kAccessRows = RowArrangement::kAccessRows; }; // // Output // using Iterations = OutputTileShape< Detail::RowArrangement::kIterationsColumn, Detail::RowArrangement::kIterationsRow, Detail::kIterationsGroup, Detail::kIterationsCluster, 1>; using Delta = OutputTileShape< Detail::RowArrangement::kDeltaColumn, Detail::RowArrangement::kDeltaRow, Detail::kDeltaGroup, Detail::kDeltaCluster, 1>; /// Initial offset function CUTLASS_DEVICE static MatrixCoord initial_offset(int thread_idx) { // int warp_idx = __shfl_sync(0xffffffff, thread_idx / kWarpSize, 0); int warp_idx = thread_idx / kWarpSize; int lane_idx = thread_idx % kWarpSize; // Compute warp location int cluster_idx = warp_idx / Detail::WarpPartitions::kCluster; int residual_cluster = warp_idx % Detail::WarpPartitions::kCluster; int group_idx = residual_cluster / Detail::WarpPartitions::kGroup; int residual_group = residual_cluster % Detail::WarpPartitions::kGroup; int row_idx = residual_group / Detail::WarpPartitions::kRow; int col_idx = residual_group % Detail::WarpPartitions::kRow; // Compute per-lane offset int lane_row_offset = lane_idx / Detail::kAccessWidth; int lane_col_offset = lane_idx % Detail::kAccessWidth; // Compute coordinate in output space int cluster_offset = cluster_idx * Shape::kRow * Count::kRow * Shape::kGroup * Count::kGroup; int group_offset = group_idx * Shape::kRow * Count::kRow; int row_offset = row_idx * Iterations::kRow * Detail::kAccessRows; int column_offset = col_idx * Iterations::kColumn * Detail::kAccessWidth * kElementsPerAccess; return MatrixCoord( cluster_offset + group_offset + row_offset + lane_row_offset, column_offset + lane_col_offset * kElementsPerAccess ); } /// Computes the offset of a given vector access CUTLASS_HOST_DEVICE static MatrixCoord iteration_offset(int iter_idx) { return OutputTileThreadMapHelpers<Iterations, Delta>::iteration_offset(iter_idx); } /// Compacted thread map in which the 4D region is contiguous struct CompactedThreadMap { using Shape = Shape_; using TileShape = MatrixShape< Shape::kTile * Shape::kCluster * Shape::kGroup * Shape::kRow, Shape::kColumn >; using Iterations = OutputTileShape< Detail::RowArrangement::kIterationsColumn, Detail::RowArrangement::kIterationsRow, Detail::kIterationsGroup, Detail::kIterationsCluster, 1>; using Delta = OutputTileShape< Detail::RowArrangement::kDeltaColumn, Detail::RowArrangement::kDeltaRow, Detail::kCompactedDeltaGroup, Detail::kCompactedDeltaCluster, 1>; /// Number of elements within each vector access static int const kElementsPerAccess = ElementsPerAccess; /// Number of threads static int const kThreads = Threads; /// Function to compute each thread's initial offset CUTLASS_DEVICE static MatrixCoord initial_offset(int thread_idx) { // int warp_idx = __shfl_sync(0xffffffff, thread_idx / kWarpSize, 0); int warp_idx = thread_idx / kWarpSize; int lane_idx = thread_idx % kWarpSize; // Compute warp location int cluster_idx = warp_idx / Detail::WarpPartitions::kCluster; int residual_cluster = warp_idx % Detail::WarpPartitions::kCluster; int group_idx = residual_cluster / Detail::WarpPartitions::kGroup; int residual_group = residual_cluster % Detail::WarpPartitions::kGroup; int row_idx = residual_group / Detail::WarpPartitions::kRow; int col_idx = residual_group % Detail::WarpPartitions::kRow; // Compute per-lane offset int lane_row_offset = lane_idx / Detail::kAccessWidth; int lane_col_offset = lane_idx % Detail::kAccessWidth; // Compute coordinate in output space int cluster_offset = cluster_idx * Shape::kRow * Shape::kGroup; int group_offset = group_idx * Shape::kRow; int row_offset = row_idx * Iterations::kRow * Detail::kAccessRows; int column_offset = col_idx * Iterations::kColumn * Detail::kAccessWidth * kElementsPerAccess; MatrixCoord coord( cluster_offset + group_offset + row_offset + lane_row_offset, column_offset + lane_col_offset * kElementsPerAccess ); return coord; } }; }; //////////////////////////////////////////////////////////////////////////////// /// Template metaprogram for partitioning a 3D interleaved layout across warps /// to achieve several performance objectives: /// /// - coalesced memory accesses in units of 64 Byte lines /// - minimal address arithmetic /// - minimal predicate calculations /// template <typename WarpCount_, typename Iterations_, int Threads, int ElementsPerAccess, int ElementSize> struct InterleavedOutputTileThreadMap { using WarpCount = WarpCount_; static int const kWarpSize = 32; static int const kThreads = Threads; static int const kWarpCount = kThreads / kWarpSize; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; // // Metaprogram computation // struct Detail {}; // // Output // using Iterations = Iterations_; using Delta = layout::PitchLinearShape<kWarpSize * kElementsPerAccess, 1>; /// Initial offset function CUTLASS_HOST_DEVICE static layout::PitchLinearCoord initial_offset(int thread_idx) { int warp_idx = thread_idx / kWarpSize; int lane_idx = thread_idx % kWarpSize; // Compute warp location layout::PitchLinearCoord warp_footprint{ Delta::kContiguous * Iterations::kContiguous, Delta::kStrided * Iterations::kStrided}; layout::PitchLinearCoord warp_offset{warp_idx % WarpCount::kContiguous, warp_idx / WarpCount::kContiguous}; // Compute per-lane offset layout::PitchLinearCoord thread_offset_in_warp{ lane_idx * kElementsPerAccess, 0}; layout::PitchLinearCoord thread_offset_in_threadblock_tile = warp_footprint * warp_offset + thread_offset_in_warp; return thread_offset_in_threadblock_tile; } }; //////////////////////////////////////////////////////////////////////////////// /// Template metaprogram for partitioning a 4D interleaved layout across warps /// to achieve several performance objectives: /// /// - coalesced memory accesses in units of 64 Byte lines /// - minimal address arithmetic /// - minimal predicate calculations /// template <typename WarpCount_, typename Iterations_, int Threads, int ElementsPerAccess, int ElementSize> struct InterleavedConvOutputTileThreadMap { using WarpCount = WarpCount_; static int const kWarpSize = 32; static int const kThreads = Threads; static int const kWarpCount = kThreads / kWarpSize; static int const kElementsPerAccess = ElementsPerAccess; static int const kElementSize = ElementSize; // // Metaprogram computation // struct Detail {}; // // Output // using Iterations = Iterations_; using Delta = MatrixShape<kWarpSize / 4, 4 * kElementsPerAccess>; /// Initial offset function CUTLASS_HOST_DEVICE static MatrixCoord initial_offset(int thread_idx) { int warp_idx = thread_idx / kWarpSize; int lane_idx = thread_idx % kWarpSize; // Compute warp location MatrixCoord warp_footprint{ Delta::kRow * Iterations::kRow, Delta::kColumn * Iterations::kColumn, }; MatrixCoord warp_offset{warp_idx % WarpCount::kRow, warp_idx / WarpCount::kRow}; // Compute per-lane offset MatrixCoord thread_offset_in_warp{lane_idx / 4, (lane_idx % 4) * kElementsPerAccess}; MatrixCoord thread_offset_in_threadblock_tile = warp_footprint * warp_offset + thread_offset_in_warp; return thread_offset_in_threadblock_tile; } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass
include/cutlass/epilogue/threadblock/output_tile_thread_map.h/0
{ "file_path": "include/cutlass/epilogue/threadblock/output_tile_thread_map.h", "repo_id": "include", "token_count": 6655 }
27
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/arch/mma.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/detail/layout.hpp" #include "cutlass/detail/collective.hpp" #include "cutlass/detail/dependent_false.hpp" #include "cute/atom/mma_traits_sm90_gmma.hpp" #include "cute/atom/copy_traits_sm90_tma.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::collective { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { // // Some named constants // constexpr int tma_alignment_bytes = 16; constexpr int cp_async_min_alignment_bytes = 4; constexpr int sm90_smem_capacity_bytes = 232448; // Maps 2.x A matrix layout tag to respective GMMA major mode enum template <class ElementA, class LayoutA> constexpr cute::GMMA::Major gmma_ss_tag_to_major_A() { // MN major mode is only valid for non-TF32, non-int and non-fp8 MMAs if constexpr (cutlass::gemm::detail::is_mn_major_A<LayoutA>() && not cute::is_same_v<ElementA, tfloat32_t> && sizeof(ElementA) != 1) { return cute::GMMA::Major::MN; } else { return cute::GMMA::Major::K; } } // Maps 2.x B matrix layout tag to respective GMMA major mode enum template <class ElementB, class LayoutB> constexpr cute::GMMA::Major gmma_ss_tag_to_major_B() { // MN major mode is only valid for non-TF32, non-int and non-fp8 MMAs if constexpr (cutlass::gemm::detail::is_mn_major_B<LayoutB>() && not cute::is_same_v<ElementB, tfloat32_t> && sizeof(ElementB) != 1) { return cute::GMMA::Major::MN; } else { return cute::GMMA::Major::K; } } template <class LayoutA> constexpr cute::GMMA::Major gmma_rs_tag_to_major_A() { // MN major mode is only valid for non-TF32 and non-int MMAs if constexpr (cutlass::gemm::detail::is_mn_major_A<LayoutA>()) { return cute::GMMA::Major::MN; } else { return cute::GMMA::Major::K; } } template <class LayoutB> constexpr cute::GMMA::Major gmma_rs_tag_to_major_B() { // MN major mode is only valid for non-TF32 and non-int MMAs if constexpr (cutlass::gemm::detail::is_mn_major_B<LayoutB>()) { return cute::GMMA::Major::MN; } else { return cute::GMMA::Major::K; } } // Maps a rank-1 cute::Shape<> representing the cluster shape on to the TMA atom that should be used with it template <class UnimodalClusterShape> constexpr auto sm90_cluster_shape_to_tma_atom(UnimodalClusterShape) { static_assert(cute::rank(UnimodalClusterShape{}) == 1, "Use this function to figure out TMA for each mode individually."); if constexpr (cute::size(UnimodalClusterShape{}) == 1) { return cute::SM90_TMA_LOAD{}; } else { return cute::SM90_TMA_LOAD_MULTICAST{}; } } // Generates the most efficient possible TiledCopy with cp.async copy atom given a set of parameters. template<int ThreadCount, class Element, int Alignment, class StrideType, class TileMN, class TileK> constexpr auto make_cp_async_gmem_tiled_copy() { using namespace cute; using AlignmentType = cute::uint_byte_t<static_cast<int>(sizeof(Element)) * Alignment>; constexpr int TileSizeMN = cute::size(TileMN{}); constexpr int TileSizeK = cute::size(TileK{}); // Maximize the number of threads along the gmem major mode to promote coalesced reads // While making sure our thread layout tiles the threadblock tile evenly if constexpr (cutlass::gemm::detail::is_k_major<StrideType>()) { // K major thread layout for K major gmem constexpr int threads_major = TileSizeK / Alignment; constexpr int threads_minor = ThreadCount / threads_major; static_assert(threads_major > 0); static_assert(ThreadCount % threads_major == 0); static_assert(threads_minor == 0 || (TileSizeMN % threads_minor == 0)); return make_tiled_copy( Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<AlignmentType>, Element>{}, Layout<Shape <Int<threads_minor>,Int<threads_major>>, Stride<Int<threads_major>, _1>>{}, Layout<Shape<_1,Int<Alignment>>>{}); } else if constexpr (cutlass::gemm::detail::is_mn_major<StrideType>()) { // MN major thread layout for MN major gmem constexpr int threads_major = TileSizeMN / Alignment; constexpr int threads_minor = ThreadCount / threads_major; static_assert(threads_major > 0); static_assert(ThreadCount % threads_major == 0); static_assert(threads_minor == 0 || (TileSizeK % threads_minor == 0)); return make_tiled_copy( Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<AlignmentType>, Element>{}, Layout<Shape <Int<threads_major>,Int<threads_minor>>, Stride< _1,Int<threads_major>>>{}, Layout<Shape<Int<Alignment>,_1>>{}); } else { static_assert(cute::is_void_v<Element>, "Unsupported gmem layout for automatic gmem tiled copy builder."); } } // Helper for SS GMMA smem selection that considers a tensor TileShape: // (BLK_MN, BLK_K) // or hierarchically // ((BLK_MN0,BLK_MN1,...),(BLK_K0,BLK_K1,...)) // and returns the optimal GMMA::Layout that fits BLK_MN0 and BLK_K0 template <cute::GMMA::Major major, class ElementType, class BLK_MN, class BLK_K, const bool is_ws_transposed_B = false> constexpr auto rs_smem_selector() { using namespace cute; auto BLK_MN0 = size<0>(BLK_MN{}); auto BLK_K0 = size<0>(BLK_K{}); static_assert(BLK_MN0 % 8 == 0, "BLK_MN0 must be a multiple of 8."); static_assert(BLK_K0 % 8 == 0, "BLK_K0 must be a multiple of 8."); if constexpr (major == GMMA::Major::MN) { if constexpr (sizeof(ElementType) == 4){ if constexpr (is_ws_transposed_B) { // only optimized transpositionB(SW32 and SW128 for tf32) can be used, but prefer SW32 due to free bank conflict if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW32_Atom<ElementType>{}; } else { static_assert(BLK_MN0 % size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{}) == 0, "BLK_MN0 must be a multiple of size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{})"); } } else { // Fall into SW32 due to free bank conflict if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW32_Atom<ElementType>{}; } else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_INTER_Atom<ElementType>{}; } else { static_assert(BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0, "BLK_MN0 must be a multiple of size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{})"); } } } // Used for int8, fp8, fp16 and bf16 I/O kernels else if constexpr (sizeof(ElementType) == 1 || sizeof(ElementType) == 2) { if constexpr (sizeof(ElementType) == 1 && is_ws_transposed_B) { // Only optimized transpositionB (SW32 for int8 and fp8) can be used if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW128_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW128_Atom<ElementType>{}; } else { static_assert(BLK_MN0 % size<0>(GMMA::Layout_MN_SW128_Atom<ElementType>{}) == 0, "BLK_MN0 must be a multiple of size<0>(GMMA::Layout_MN_128_Atom<ElementType>{})"); } } else { if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW128_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW128_Atom<ElementType>{}; } else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW64_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW64_Atom<ElementType>{}; } else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW32_Atom<ElementType>{}; } else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_INTER_Atom<ElementType>{}; } else { static_assert(BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0, "BLK_MN0 must be a multiple of size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{})"); } } } else { static_assert(cutlass::detail::dependent_false<ElementType>, "Smem selector does not support this element type"); } } else if constexpr (major == GMMA::Major::K) { if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW128_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_SW128_Atom<ElementType>{}; } else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW64_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_SW64_Atom<ElementType>{}; } else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW32_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_SW32_Atom<ElementType>{}; } else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_INTER_Atom<ElementType>{}; } else { static_assert(BLK_K0 % size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{}) == 0, "BLK_K0 must be a multiple of size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{})"); } } } // Helper for SS GMMA smem selection that considers a tensor TileShape: // (BLK_MN, BLK_K) // or hierarchically // ((BLK_MN0,BLK_MN1,...),(BLK_K0,BLK_K1,...)) // and returns the largest GMMA::Layout that fits BLK_MN0 and BLK_K0 template <cute::GMMA::Major major, class ElementType, class BLK_MN, class BLK_K> CUTE_HOST_DEVICE constexpr auto ss_smem_selector() { using namespace cute; auto BLK_MN0 = size<0>(BLK_MN{}); auto BLK_K0 = size<0>(BLK_K{}); static_assert(BLK_MN0 % 8 == 0, "BLK_MN0 must be a multiple of 8."); static_assert(BLK_K0 % 8 == 0, "BLK_K0 must be a multiple of 8."); if constexpr (major == GMMA::Major::MN) { if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW128_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW128_Atom<ElementType>{}; } else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW64_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW64_Atom<ElementType>{}; } else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_SW32_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_SW32_Atom<ElementType>{}; } else if constexpr (BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0) { return GMMA::Layout_MN_INTER_Atom<ElementType>{}; } else { static_assert(BLK_MN0 % size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{}) == 0, "BLK_MN0 must be a multiple of size<0>(GMMA::Layout_MN_INTER_Atom<ElementType>{})"); } } else if constexpr (major == GMMA::Major::K) { if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW128_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_SW128_Atom<ElementType>{}; } else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW64_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_SW64_Atom<ElementType>{}; } else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_SW32_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_SW32_Atom<ElementType>{}; } else if constexpr (BLK_K0 % size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{}) == 0) { return GMMA::Layout_K_INTER_Atom<ElementType>{}; } else { static_assert(BLK_K0 % size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{}) == 0, "BLK_K0 must be a multiple of size<1>(GMMA::Layout_K_INTER_Atom<ElementType>{})"); } } } template <class ElementA, class ElementB> constexpr bool is_input_size_two_bytes() { return (sizeof(ElementA) == 2 && sizeof(ElementB) == 2); } template <class ElementA, class ElementB> constexpr bool is_input_fp8() { return ((cute::is_same_v<ElementA, float_e4m3_t> || cute::is_same_v<ElementA, float_e5m2_t>) && (cute::is_same_v<ElementB, float_e4m3_t> || cute::is_same_v<ElementB, float_e5m2_t>)); } // We need to handle the tuples in this function since it is used in SFINAE dispatch in the CollectiveBuilder. // At that point, it is not guaranteed that the tuples have been split out into the required parts. template <class MaybeTupleElementA, class LayoutA, class MaybeTupleElementB, class LayoutB> constexpr bool is_use_rmem_A() { using ElementA = detail::deduce_mixed_width_dtype_t<0, MaybeTupleElementA>; using ElementB = detail::deduce_mixed_width_dtype_t<0, MaybeTupleElementB>; constexpr bool IsABDifferentWidth = cute::sizeof_bits_v<ElementA> != cute::sizeof_bits_v<ElementB>; constexpr bool HasScales = cute::is_tuple<MaybeTupleElementA>::value ^ cute::is_tuple<MaybeTupleElementB>::value; constexpr bool IsInputSizeTwoBytes = is_input_size_two_bytes<ElementA, ElementB>(); constexpr bool IsLayoutAkBk = cutlass::gemm::detail::is_k_major_A<LayoutA>() && cutlass::gemm::detail::is_k_major_B<LayoutB>(); constexpr bool IsUseRmemA = (!IsInputSizeTwoBytes && !IsLayoutAkBk) || IsABDifferentWidth || HasScales; return IsUseRmemA; } template <class ElementA, int AlignmentA, class ElementB, int AlignmentB, int RequiredAlignment> constexpr bool is_aligned() { return ((sizeof(ElementA) * AlignmentA) % RequiredAlignment == 0) && ((sizeof(ElementB) * AlignmentB) % RequiredAlignment == 0); } } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm::collective
include/cutlass/gemm/collective/builders/sm90_common.inl/0
{ "file_path": "include/cutlass/gemm/collective/builders/sm90_common.inl", "repo_id": "include", "token_count": 6169 }
28
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Definitions for GEMM structures */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/mma.h" #include "cutlass/arch/wmma.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_clamp.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace device { //////////////////////////////////////////////////////////////////////////////// template < typename OperatorClass, typename ArchTag, typename ElementA, typename ElementB, typename ElementC, typename ElementAccumulator > struct DefaultGemmConfiguration; //////////////////////////////////////////////////////////////////////////////// template < typename ArchTag, typename ElementA, typename ElementB, typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassSimt, ArchTag, ElementA, ElementB, ElementC, ElementAccumulator> { static int const kAlignmentA = 1; static int const kAlignmentB = 1; using ThreadblockShape = GemmShape<128, 128, 8>; using WarpShape = GemmShape<32, 64, 8>; using InstructionShape = GemmShape<1, 1, 1>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 1, ElementAccumulator, ElementAccumulator >; using Operator = arch::OpMultiplyAdd; }; //////////////////////////////////////////////////////////////////////////////// template < typename ArchTag, typename ElementC> struct DefaultGemmConfiguration<arch::OpClassSimt, ArchTag, int8_t, int8_t, ElementC, int32_t> { static int const kAlignmentA = 4; static int const kAlignmentB = 4; using ThreadblockShape = GemmShape<128, 128, 32>; using WarpShape = GemmShape<32, 64, 32>; using InstructionShape = GemmShape<1, 1, 4>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 1, int32_t, float >; using Operator = arch::OpMultiplyAdd; }; //////////////////////////////////////////////////////////////////////////////// template < typename ArchTag, typename ElementA, typename ElementB, typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassWmmaTensorOp, ArchTag, ElementA, ElementB, ElementC, ElementAccumulator> { static int const kAlignmentA = 128 / sizeof_bits<ElementA>::value; static int const kAlignmentB = 128 / sizeof_bits<ElementB>::value; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 128 / sizeof_bits<ElementC>::value, ElementAccumulator, ElementAccumulator >; using Operator = arch::OpMultiplyAdd; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementA, typename ElementB, typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm70, ElementA, ElementB, ElementC, ElementAccumulator> { static int const kAlignmentA = 128 / sizeof_bits<ElementA>::value; static int const kAlignmentB = 128 / sizeof_bits<ElementB>::value; using ThreadblockShape = GemmShape<128, 256, 32>; using WarpShape = GemmShape<64, 64, 32>; using InstructionShape = GemmShape<8, 8, 4>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 128 / sizeof_bits<ElementC>::value, ElementAccumulator, ElementAccumulator >; using Operator = arch::OpMultiplyAdd; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementA, typename ElementB, typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, ElementA, ElementB, ElementC, ElementAccumulator> { static int const kAlignmentA = 128 / sizeof_bits<ElementA>::value; static int const kAlignmentB = 128 / sizeof_bits<ElementA>::value; using ThreadblockShape = GemmShape<128, 256, 32>; using WarpShape = GemmShape<64, 64, 32>; using InstructionShape = GemmShape<16, 8, 8>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 128 / sizeof_bits<ElementC>::value, ElementAccumulator, ElementAccumulator >; using Operator = typename platform::conditional< (platform::is_same<ElementA, int8_t>::value || platform::is_same<ElementA, int4b_t>::value || platform::is_same<ElementA, uint8_t>::value || platform::is_same<ElementA, uint4b_t>::value), arch::OpMultiplyAddSaturate, arch::OpMultiplyAdd>::type; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, int8_t, int8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<8, 8, 16>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, int8_t, uint8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<8, 8, 16>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, uint8_t, int8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<8, 8, 16>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, uint8_t, uint8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<8, 8, 16>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, int4b_t, int4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<8, 8, 32>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, int4b_t, uint4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<8, 8, 32>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, uint4b_t, int4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<8, 8, 32>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, uint4b_t, uint4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<8, 8, 32>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm75, uint1b_t, uint1b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint1b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint1b_t>::value; using ThreadblockShape = GemmShape<128, 256, 512>; using WarpShape = GemmShape<64, 64, 512>; using InstructionShape = GemmShape<8, 8, 128>; static int const kStages = 2; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpXorPopc; }; //////////////////////////////////////////////////////////////////////////////// template <typename ElementA, typename ElementB, typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration<arch::OpClassTensorOp, arch::Sm80, ElementA, ElementB, ElementC, ElementAccumulator> { static int const kAlignmentA = 128 / sizeof_bits<ElementA>::value; static int const kAlignmentB = 128 / sizeof_bits<ElementA>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<16, 8, 16>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 128 / sizeof_bits<ElementC>::value, ElementAccumulator, ElementAccumulator>; using Operator = typename platform::conditional< (platform::is_same<ElementA, int8_t>::value || platform::is_same<ElementA, int4b_t>::value || platform::is_same<ElementA, uint8_t>::value || platform::is_same<ElementA, uint4b_t>::value), arch::OpMultiplyAddSaturate, arch::OpMultiplyAdd>::type; }; //////////////////////////////////////////////////////////////////////////////// template <typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration<arch::OpClassTensorOp, arch::Sm80, double, double, ElementC, ElementAccumulator> { static int const kAlignmentA = 1; static int const kAlignmentB = 1; using ThreadblockShape = GemmShape<128, 128, 16>; using WarpShape = GemmShape<32, 64, 16>; using InstructionShape = GemmShape<8, 8, 4>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 1, ElementAccumulator, ElementAccumulator>; using Operator = arch::OpMultiplyAdd; }; template <> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, complex<double>, complex<double>, complex<double>, complex<double> > { static int const kAlignmentA = 1; static int const kAlignmentB = 1; using ThreadblockShape = GemmShape<64, 64, 16>; using WarpShape = GemmShape<32, 32, 16>; using InstructionShape = GemmShape<8, 8, 4>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombination< complex<double>, 1, complex<double>, complex<double>>; using Operator = arch::OpMultiplyAddComplex; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, int8_t, int8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<16, 8, 32>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, int8_t, uint8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<16, 8, 32>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, uint8_t, int8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<16, 8, 32>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, uint8_t, uint8_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint8_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint8_t>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<16, 8, 32>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, int4b_t, int4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<16, 8, 64>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, int4b_t, uint4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<int4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<16, 8, 64>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, uint4b_t, int4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<int4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<16, 8, 64>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, uint4b_t, uint4b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint4b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint4b_t>::value; using ThreadblockShape = GemmShape<128, 256, 128>; using WarpShape = GemmShape<64, 64, 128>; using InstructionShape = GemmShape<16, 8, 64>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAddSaturate; }; //////////////////////////////////////////////////////////////////////////////// template < typename ElementC> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm80, uint1b_t, uint1b_t, ElementC, int32_t> { static int const kAlignmentA = 128 / sizeof_bits<uint1b_t>::value; static int const kAlignmentB = 128 / sizeof_bits<uint1b_t>::value; using ThreadblockShape = GemmShape<128, 256, 512>; using WarpShape = GemmShape<64, 64, 512>; using InstructionShape = GemmShape<16, 8, 256>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombinationClamp< ElementC, 128 / sizeof_bits<ElementC>::value, int32_t, float>; using Operator = arch::OpMultiplyAdd; }; //////////////////////////////////////////////////////////////////////////////// /// Base configuration for all {fe4m3, fe5m2} x {fe4m3, fe5m2} combinations on SM89 template < typename ElementA, typename ElementB, typename ElementC, typename ElementAccumulator> struct DefaultGemmConfigurationSm89F8 { static_assert((platform::is_same<ElementA, cutlass::float_e4m3_t>::value || platform::is_same<ElementA, cutlass::float_e5m2_t>::value), "ElementA must be of type float_e4m3_t or float_e5m2_t"); static_assert((platform::is_same<ElementB, cutlass::float_e4m3_t>::value || platform::is_same<ElementB, cutlass::float_e5m2_t>::value), "ElementB must be of type float_e4m3_t or float_e5m2_t"); static int const kAlignmentA = 128 / sizeof_bits<ElementA>::value; static int const kAlignmentB = 128 / sizeof_bits<ElementB>::value; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<16, 8, 32>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 128 / sizeof_bits<ElementC>::value, ElementAccumulator, ElementAccumulator>; using Operator = arch::OpMultiplyAdd; }; /// Partial specialization for SM89 fe4m3 x fe4m3 template <typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm89, cutlass::float_e4m3_t, cutlass::float_e4m3_t, ElementC, ElementAccumulator> : DefaultGemmConfigurationSm89F8< cutlass::float_e4m3_t, cutlass::float_e4m3_t, ElementC, ElementAccumulator> {}; /// Partial specialization for SM89 fe4m3 x fe5m2 template <typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm89, cutlass::float_e4m3_t, cutlass::float_e5m2_t, ElementC, ElementAccumulator> : DefaultGemmConfigurationSm89F8< cutlass::float_e4m3_t, cutlass::float_e5m2_t, ElementC, ElementAccumulator> {}; /// Partial specialization for SM89 fe5m2 x fe4m3 template <typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm89, cutlass::float_e5m2_t, cutlass::float_e4m3_t, ElementC, ElementAccumulator> : DefaultGemmConfigurationSm89F8< cutlass::float_e5m2_t, cutlass::float_e4m3_t, ElementC, ElementAccumulator> {}; /// Partial specialization for SM89 fe5m2 x fe5m2 template <typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm89, cutlass::float_e5m2_t, cutlass::float_e5m2_t, ElementC, ElementAccumulator> : DefaultGemmConfigurationSm89F8< cutlass::float_e5m2_t, cutlass::float_e5m2_t, ElementC, ElementAccumulator> {}; //////////////////////////////////////////////////////////////////////////////// template <typename ElementC, typename ElementAccumulator> struct DefaultGemmConfiguration<arch::OpClassTensorOp, arch::Sm90, double, double, ElementC, ElementAccumulator> { static int const kAlignmentA = 1; static int const kAlignmentB = 1; using ThreadblockShape = GemmShape<128, 256, 64>; using WarpShape = GemmShape<64, 64, 64>; using InstructionShape = GemmShape<16, 8, 4>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombination< ElementC, 1, ElementAccumulator, ElementAccumulator>; using Operator = arch::OpMultiplyAdd; }; template <> struct DefaultGemmConfiguration< arch::OpClassTensorOp, arch::Sm90, complex<double>, complex<double>, complex<double>, complex<double> > { static int const kAlignmentA = 1; static int const kAlignmentB = 1; using ThreadblockShape = GemmShape<64, 64, 16>; using WarpShape = GemmShape<32, 32, 16>; using InstructionShape = GemmShape<16, 8, 4>; static int const kStages = 3; using EpilogueOutputOp = epilogue::thread::LinearCombination< complex<double>, 1, complex<double>, complex<double>>; using Operator = arch::OpMultiplyAddComplex; }; } // namespace device } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/gemm/device/default_gemm_configuration.h/0
{ "file_path": "include/cutlass/gemm/device/default_gemm_configuration.h", "repo_id": "include", "token_count": 9752 }
29
/*************************************************************************************************** * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a GEMM kernel that computes the absolute maximum of the output tensor and applies additional scaling factors to operands. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/arch.h" #include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/gemm/kernel/gemm_universal.h" #include "cutlass/gemm/kernel/default_gemm_universal.h" #include "cutlass/gemm/kernel/default_gemm_with_absmax.h" #include "cutlass/gemm/device/default_gemm_configuration.h" #include "cutlass/gemm/device/gemm_universal_base.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// // Universal GEMM with absolute-maximum calculation and scaling template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Element type for internal accumulation typename ElementAccumulator_ = ElementC_, /// Operator class tag typename OperatorClass_ = arch::OpClassTensorOp, /// Tag indicating architecture to tune for. This is the minimum SM that /// supports the intended feature. The device kernel can be built /// targeting any SM larger than this number. typename ArchTag_ = arch::Sm89, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::WarpShape, /// Instruction-level tile size (concept: GemmShape) typename InstructionShape_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::InstructionShape, /// Epilogue output operator typename EpilogueOutputOp_ = cutlass::epilogue::thread::LinearCombinationBiasElementwise< ElementC_, ElementAccumulator_, ElementAccumulator_, ElementC_, ElementC_, 128 / cutlass::sizeof_bits<ElementC_>::value>, /// Threadblock-level swizzling operator typename ThreadblockSwizzle_ = threadblock::GemmIdentityThreadblockSwizzle<>, /// Number of stages used in the pipelined mainloop int Stages = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kStages, /// Access granularity of A matrix in units of elements int AlignmentA = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentA, /// Access granularity of B matrix in units of elements int AlignmentB = DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::kAlignmentB, /// Operation performed by GEMM typename Operator_ = typename DefaultGemmConfiguration< OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_, ElementAccumulator_>::Operator, /// Complex elementwise transformation on A operand ComplexTransform TransformA = ComplexTransform::kNone, /// Complex elementwise transformation on B operand ComplexTransform TransformB = ComplexTransform::kNone > class GemmUniversalWithAbsMax; // Partial specialization for SM89 template < typename ElementA_, typename LayoutA_, typename ElementB_, typename LayoutB_, typename ElementC_, typename LayoutC_, typename ElementAccumulator_, typename ThreadblockShape_, typename WarpShape_, typename InstructionShape_, typename EpilogueOutputOp_, typename ThreadblockSwizzle_, int Stages, int AlignmentA, int AlignmentB, typename Operator_, ComplexTransform TransformA, ComplexTransform TransformB > class GemmUniversalWithAbsMax< ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, ElementAccumulator_, arch::OpClassTensorOp, arch::Sm89, ThreadblockShape_, WarpShape_, InstructionShape_, EpilogueOutputOp_, ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB, Operator_, TransformA, TransformB > : public GemmUniversalBase< typename kernel::DefaultGemmWithAbsMax< ElementA_, LayoutA_, TransformA, AlignmentA, ElementB_, LayoutB_, TransformB, AlignmentB, ElementC_, LayoutC_, ElementAccumulator_, arch::OpClassTensorOp, arch::Sm89, ThreadblockShape_, WarpShape_, InstructionShape_, EpilogueOutputOp_, ThreadblockSwizzle_, Stages, Operator_ >::GemmKernel > { public: using ElementAccumulator = ElementAccumulator_; using OperatorClass = arch::OpClassTensorOp; using ArchTag = arch::Sm89; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using EpilogueOutputOp = EpilogueOutputOp_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentB = AlignmentB; static int const kAlignmentC = EpilogueOutputOp::kCount; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using Base = GemmUniversalBase< typename kernel::DefaultGemmWithAbsMax< ElementA_, LayoutA_, TransformA, AlignmentA, ElementB_, LayoutB_, TransformB, AlignmentB, ElementC_, LayoutC_, ElementAccumulator_, OperatorClass, ArchTag, ThreadblockShape_, WarpShape_, InstructionShape_, EpilogueOutputOp_, ThreadblockSwizzle_, Stages, Operator_ >::GemmKernel >; using Arguments = typename Base::Arguments; using GemmKernel = typename Base::GemmKernel; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for SM89 column-major output exchanges problem size and operand. template < typename ElementA_, typename LayoutA_, typename ElementB_, typename LayoutB_, typename ElementC_, typename ElementAccumulator_, typename ThreadblockShape_, typename WarpShape_, typename InstructionShape_, typename EpilogueOutputOp_, typename ThreadblockSwizzle_, int Stages, int AlignmentA, int AlignmentB, typename Operator_, ComplexTransform TransformA, ComplexTransform TransformB> class GemmUniversalWithAbsMax<ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, layout::ColumnMajor, // partially specialized on LayoutC ElementAccumulator_, arch::OpClassTensorOp, arch::Sm89, ThreadblockShape_, WarpShape_, InstructionShape_, EpilogueOutputOp_, ThreadblockSwizzle_, Stages, AlignmentA, AlignmentB, Operator_, TransformA, TransformB> { public: using ElementA = ElementA_; using LayoutA = LayoutA_; using TensorRefA = TensorRef<ElementA const, LayoutA>; using ElementB = ElementB_; using LayoutB = LayoutB_; using TensorRefB = TensorRef<ElementB const, LayoutB>; using ElementC = ElementC_; using LayoutC = layout::ColumnMajor; using TensorRefC = TensorRef<ElementC const, LayoutC>; using TensorRefD = TensorRef<ElementC, LayoutC>; using ElementAccumulator = ElementAccumulator_; using OperatorClass = arch::OpClassTensorOp; using ArchTag = arch::Sm89; using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; using InstructionShape = InstructionShape_; using EpilogueOutputOp = EpilogueOutputOp_; using ThreadblockSwizzle = ThreadblockSwizzle_; using Operator = Operator_; static int const kStages = Stages; static int const kAlignmentA = AlignmentA; static int const kAlignmentB = AlignmentB; static ComplexTransform const kTransformA = TransformA; static ComplexTransform const kTransformB = TransformB; using UnderlyingOperator = typename GemmUniversalWithAbsMax< ElementB, typename layout::LayoutTranspose<LayoutB>::type, ElementA, typename layout::LayoutTranspose<LayoutA>::type, ElementC, layout::RowMajor, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, kAlignmentB, kAlignmentA, Operator, kTransformB, kTransformA >::Base; using GemmKernel = typename UnderlyingOperator::GemmKernel; static int const kAlignmentC = EpilogueOutputOp::kCount; /// Argument structure using Arguments = typename UnderlyingOperator::Arguments; private: UnderlyingOperator underlying_operator_; public: /// Constructs the GEMM. GemmUniversalWithAbsMax() { } /// Helper to construct a transposed equivalent for the underying GEMM operator static Arguments to_underlying_arguments(Arguments const &args) { return args.transposed_problem(); } /// Determines whether the GEMM can execute the given problem. static Status can_implement(Arguments const &args) { return UnderlyingOperator::can_implement(to_underlying_arguments(args)); } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { return UnderlyingOperator::get_workspace_size(to_underlying_arguments(args)); } /// Computes the grid shape static dim3 get_grid_shape(Arguments const &args) { return UnderlyingOperator::get_grid_shape(to_underlying_arguments(args)); } /// Computes the maximum number of active blocks per multiprocessor static int maximum_active_blocks(int smem_capacity = -1) { return UnderlyingOperator::maximum_active_blocks(smem_capacity); } /// Initializes GEMM state from arguments. Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { return underlying_operator_.initialize(to_underlying_arguments(args), workspace, stream); } /// Lightweight update given a subset of arguments Status update(Arguments const &args, void *workspace = nullptr) { return underlying_operator_.update(to_underlying_arguments(args), workspace); } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { return underlying_operator_.run(stream); } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; //////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/gemm/device/gemm_universal_with_absmax.h/0
{ "file_path": "include/cutlass/gemm/device/gemm_universal_with_absmax.h", "repo_id": "include", "token_count": 4561 }
30
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level Rank2K definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/blas3.h" #include "cutlass/layout/matrix.h" #include "cutlass/arch/wmma.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/rank_2k_universal.h" #include "cutlass/gemm/threadblock/default_mma_core_sm75.h" #include "cutlass/gemm/threadblock/default_mma_core_sm70.h" #include "cutlass/gemm/threadblock/default_mma_core_sm80.h" #include "cutlass/gemm/threadblock/default_mma.h" #include "cutlass/gemm/threadblock/default_mma_core_simt.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op_blas3.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_simt.h" #include "cutlass/transform/threadblock/predicated_tile_iterator.h" #if defined(CUTLASS_ARCH_WMMA_ENABLED) #include "cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h" #endif //CUTLASS_ARCH_WMMA_ENABLED //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { //////////////////////////////////////////////////////////////////////////////// template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC_, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator, /// Blas3 computation mode BlasMode BlasMode_ = BlasMode::kSymmetric> struct DefaultRank2K; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Hopper Architecture template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of A matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC, /// Element type for internal accumulation typename ElementAccumulator, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator> struct DefaultRank2K< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp, arch::Sm90, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator> { /// Define the threadblock-scoped matrix multiply-accumulate (A x BT) using Mma1 = typename cutlass::gemm::threadblock::DefaultMma< ElementA, LayoutA, kAlignmentA, ElementB, typename layout::LayoutTranspose<LayoutB>::type, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator>::ThreadblockMma; /// Define the threadblock-scoped matrix multiply-accumulate (B x AT) using Mma2 = typename cutlass::gemm::threadblock::DefaultMma< ElementB, LayoutB, kAlignmentB, ElementA, typename layout::LayoutTranspose<LayoutA>::type, kAlignmentA, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm90, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator>::ThreadblockMma; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpBlas3< ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount, BlasMode::kSymmetric>::Epilogue; /// Define the kernel-level Rank2K operator. using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, BlasMode::kSymmetric>; }; //////////////////////////////////////////////////////////////////////////////// /// Partial specialization for Ampere Architecture template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of A matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC, /// Element type for internal accumulation typename ElementAccumulator, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by GEMM typename Operator> struct DefaultRank2K< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,layout::RowMajor, FillModeC, ElementAccumulator, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator> { /// Define the threadblock-scoped matrix multiply-accumulate (A x BT) using Mma1 = typename cutlass::gemm::threadblock::DefaultMma< ElementA, LayoutA, kAlignmentA, ElementB, typename layout::LayoutTranspose<LayoutB>::type, kAlignmentB, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator>::ThreadblockMma; /// Define the threadblock-scoped matrix multiply-accumulate (B x AT) using Mma2 = typename cutlass::gemm::threadblock::DefaultMma< ElementB, LayoutB, kAlignmentB, ElementA, typename layout::LayoutTranspose<LayoutA>::type, kAlignmentA, ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, Stages, Operator>::ThreadblockMma; static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK; /// Define the epilogue using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpBlas3< ThreadblockShape, typename Mma1::Operator, kPartitionsK, EpilogueOutputOp, EpilogueOutputOp::kCount, BlasMode::kSymmetric>::Epilogue; /// Define the kernel-level Rank2K operator. using Rank2Kkernel = kernel::Rank2KUniversal<Mma1, Mma2, Epilogue, ThreadblockSwizzle, FillModeC, BlasMode::kSymmetric>; }; //////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass
include/cutlass/gemm/kernel/default_rank_2k.h/0
{ "file_path": "include/cutlass/gemm/kernel/default_rank_2k.h", "repo_id": "include", "token_count": 3908 }
31
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Gemm kernel with an epilogue defined under the epilogue visitor concept with streamk. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/complex.h" #include "cutlass/barrier.h" #include "cutlass/block_striped.h" #include "cutlass/trace.h" #include "cutlass/gemm/kernel/gemm_universal_streamk.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_ ///! Threadblock mapping function > class GemmWithEpilogueVisitorStreamk { public: using Base = GemmUniversalStreamk<Mma_, Epilogue_, ThreadblockSwizzle_>; // // Types and constants // using Mma = Mma_; using Epilogue = Epilogue_; using FusionCallbacks = typename Epilogue::FusionCallbacks; using EpilogueOutputOp = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename Epilogue::OutputTileIterator::Layout; /// The per-thread tile of raw accumulators using AccumulatorTile = typename Mma::FragmentC; static ComplexTransform const kTransformA = Mma::kTransformA; static ComplexTransform const kTransformB = Mma::kTransformB; using Operator = typename Mma::Operator; using OperatorClass = typename Mma::Operator::OperatorClass; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename Mma::Operator::Shape; using InstructionShape = typename Mma::Policy::Operator::InstructionShape; using ArchTag = typename Mma::ArchTag; static int const kStages = Mma::kStages; static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; /// Workspace bytes per thread block static size_t const kWorkspaceBytesPerBlock = __NV_STD_MAX( kThreadCount * sizeof(AccumulatorTile), Epilogue::kWorkspaceBytesPerBlock); /// Block-striped reduction utility using BlockStripedReduceT = BlockStripedReduce<kThreadCount, AccumulatorTile>; // // Structures // using Arguments = typename Base::Arguments; /// Parameters structure struct Params { public: // // Data members // cute::Shape<int32_t,int32_t,int32_t> problem_shape{}; void * ptr_A{nullptr}; void * ptr_B{nullptr}; typename Mma::IteratorA::Params params_A{}; typename Mma::IteratorB::Params params_B{}; int64_t batch_stride_A{0}; int64_t batch_stride_B{0}; GemmUniversalMode mode{GemmUniversalMode::kGemm}; ThreadblockSwizzle block_mapping{}; void *barrier_workspace{nullptr}; void *partials_workspace{nullptr}; typename FusionCallbacks::Params output_op{}; void * ptr_D{nullptr}; void * ptr_C{nullptr}; typename Epilogue::OutputTileIterator::Params params_D{}; typename Epilogue::OutputTileIterator::Params params_C{}; int64_t batch_stride_D{0}; int64_t batch_stride_C{0}; protected: // // Host-only dispatch-utilities // /// Pad the given allocation size up to the nearest cache line static size_t cacheline_align_up(size_t size) { static const int CACHELINE_SIZE = 128; return (size + CACHELINE_SIZE - 1) / CACHELINE_SIZE * CACHELINE_SIZE; } /// Get the workspace size needed for barrier size_t get_barrier_workspace_size() const { // For atomic reduction, each SK-block needs a synchronization flag. For parallel reduction, // each reduction block needs its own synchronization flag. int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); int num_flags = fast_max(sk_blocks, block_mapping.reduction_blocks); return cacheline_align_up(sizeof(typename Barrier::T) * num_flags); } /// Get the workspace size needed for intermediate partial sums size_t get_partials_workspace_size() const { int sk_blocks = block_mapping.sk_regions() * block_mapping.sk_blocks_per_region(); return cacheline_align_up(kWorkspaceBytesPerBlock * sk_blocks); } public: // // Host dispatch API // /// Default constructor Params() = default; /// Constructor Params( Arguments const &args, /// GEMM application arguments int device_sms, /// Number of SMs on the device int sm_occupancy) /// Kernel SM occupancy (in thread blocks) : problem_shape({args.problem_size.m(), args.problem_size.n(), args.batch_count}), params_A(args.lda ? make_Coord_with_padding<LayoutA::kStrideRank>(args.lda) : args.stride_a), params_B(args.ldb ? make_Coord_with_padding<LayoutB::kStrideRank>(args.ldb) : args.stride_b), params_C(args.ldc ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldc) : args.stride_c), params_D(args.ldd ? make_Coord_with_padding<LayoutC::kStrideRank>(args.ldd) : args.stride_d), output_op(FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/)), mode(args.mode), ptr_A(const_cast<void *>(args.ptr_A)), ptr_B(const_cast<void *>(args.ptr_B)), ptr_C(const_cast<void *>(args.ptr_C)), ptr_D(args.ptr_D), batch_stride_A(args.batch_stride_A), batch_stride_B(args.batch_stride_B), batch_stride_C(args.batch_stride_C), batch_stride_D(args.batch_stride_D), barrier_workspace(nullptr), partials_workspace(nullptr) { // Number of SMs to make available for StreamK decomposition int avail_sms = (args.avail_sms == -1) ? device_sms : fast_min(args.avail_sms, device_sms); // Initialize the block mapping structure block_mapping = ThreadblockSwizzle( args.mode, args.problem_size, {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.batch_count, sm_occupancy, device_sms, avail_sms, sizeof(ElementA), sizeof(ElementB), sizeof(ElementC), Epilogue::kAccumulatorFragments); } /// Returns the workspace size (in bytes) needed for these parameters size_t get_workspace_size() const { return get_barrier_workspace_size() + get_partials_workspace_size(); } /// Assign and initialize the specified workspace buffer. Assumes /// the memory allocated to workspace is at least as large as get_workspace_size(). Status init_workspace( void *workspace, cudaStream_t stream = nullptr) { uint8_t *ptr = static_cast<uint8_t*>(workspace); // Establish partials workspace partials_workspace = nullptr; size_t partials_workspace_bytes = get_partials_workspace_size(); if (partials_workspace_bytes > 0) { if (!workspace) { return Status::kErrorWorkspaceNull; } partials_workspace = ptr; ptr += partials_workspace_bytes; } // Establish barrier workspace barrier_workspace = nullptr; size_t barrier_workspace_bytes = get_barrier_workspace_size(); if (barrier_workspace_bytes > 0) { if (!workspace) { return Status::kErrorWorkspaceNull; } barrier_workspace = ptr; ptr += barrier_workspace_bytes; } // Zero-initialize barrier workspace if (barrier_workspace) { size_t barrier_workspace_bytes = get_barrier_workspace_size(); CUTLASS_TRACE_HOST(" Initialize " << barrier_workspace_bytes << " barrier bytes"); cudaError_t result = cudaMemsetAsync( barrier_workspace, 0, barrier_workspace_bytes, stream); if (result != cudaSuccess) { CUTLASS_TRACE_HOST(" cudaMemsetAsync() returned error " << cudaGetErrorString(result)); return Status::kErrorInternal; } } return Status::kSuccess; } /// Returns the GEMM volume in thread block tiles cutlass::gemm::GemmCoord get_tiled_shape() const { return block_mapping.tiled_shape(); } /// Returns the total number of thread blocks to launch int get_grid_blocks() const { dim3 grid_dims = get_grid_dims(); return grid_dims.x * grid_dims.y * grid_dims.z; } /// Returns the grid extents in thread blocks to launch dim3 get_grid_dims() const { return block_mapping.get_grid_dims(); } /// Lightweight update given a subset of arguments. void update(Arguments const &args) { CUTLASS_TRACE_HOST("GemmUniversalStreamK::Params::update()"); // Update input/output pointers ptr_A = const_cast<void *>(args.ptr_A); ptr_B = const_cast<void *>(args.ptr_B); ptr_C = const_cast<void *>(args.ptr_C); ptr_D = args.ptr_D; batch_stride_A = args.batch_stride_A; batch_stride_B = args.batch_stride_B; batch_stride_C = args.batch_stride_C; batch_stride_D = args.batch_stride_D; output_op = FusionCallbacks::to_underlying_arguments(args.problem_size, args.epilogue, nullptr /*workspace*/); problem_shape = make_shape(args.problem_size.m(), args.problem_size.n(), args.batch_count); } }; struct TileWorkDesc: Base::TileWorkDesc { int k_end; CUTLASS_DEVICE bool tile_finished(Params const &params) { return (k_end == params.block_mapping.problem_size.k()); } }; // using TileWorkDesc = typename Base::TileWorkDesc; using SharedStorage = typename Base::SharedStorage; protected: // // Data members // /// GEMM problem parameters Params params; /// Shared storage reference SharedStorage &shared_storage; /// ID within the threadblock int thread_idx; /// ID of warp int warp_idx; /// ID of each thread within a warp int lane_idx; /// Threadblock scoped epilogue Epilogue epilogue; public: // // Host-only dispatch API // /// Determines whether the GEMM problem size satisfies this kernel's /// alignment requirements static Status can_implement( cutlass::gemm::GemmCoord const & problem_size) { return Base::can_implement(problem_size); } /// Determines whether the GEMM problem satisfies this kernel's /// alignment requirements static Status can_implement(Arguments const &args) { return can_implement(args.problem_size); } protected: // // Device-only utility methods // /// Iterator for fetching tile fragments from A CUTLASS_DEVICE typename Mma::IteratorA init_iterator_A( TileWorkDesc &tile_work, GemmUniversalMode mode) { // The input A matrix ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A); // Update input pointers based on batched/array mode if (mode == GemmUniversalMode::kBatched) { ptr_A += tile_work.tiled_coord.k() * params.batch_stride_A; } if (mode == GemmUniversalMode::kArray) { ptr_A = static_cast<ElementA * const *>(params.ptr_A)[tile_work.tiled_coord.k()]; } int m_begin = tile_work.tiled_coord.m() * Mma::Shape::kM; int m_end = params.block_mapping.problem_size.m(); return Mma::IteratorA( params.params_A, ptr_A, { m_end, tile_work.k_end }, threadIdx.x, { m_begin, tile_work.k_begin }); } /// Iterator for fetching tile fragments from B CUTLASS_DEVICE typename Mma::IteratorB init_iterator_B( TileWorkDesc &tile_work, GemmUniversalMode mode) { // The input B matrix ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B); // Update input pointers based on batched/array mode if (mode == GemmUniversalMode::kBatched) { ptr_B += tile_work.tiled_coord.k() * params.batch_stride_B; } if (mode == GemmUniversalMode::kArray) { ptr_B = static_cast<ElementB * const *>(params.ptr_B)[tile_work.tiled_coord.k()]; } int n_begin = tile_work.tiled_coord.n() * Mma::Shape::kN; int n_end = params.block_mapping.problem_size.n(); return Mma::IteratorB( params.params_B, ptr_B, { tile_work.k_end, n_end }, threadIdx.x, { tile_work.k_begin, n_begin }); } CUTLASS_DEVICE void init_dp_tile_work( TileWorkDesc &tile_work, int tile_idx) { // The linear tile index tile_work.tile_idx = tile_idx; // The first global-scoped MAC-iteration this threadblock will perform for this tile tile_work.iter_begin = tile_idx * params.block_mapping.iters_per_tile(); // The number of MAC-iterations this threadblock will perform for this tile tile_work.k_iters_remaining = params.block_mapping.iters_per_tile(); // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_begin = 0; // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_end = params.block_mapping.problem_size.k(); // The location of this tile (in threadblock-tile coordinates) in the output matrix tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); } CUTLASS_DEVICE void init_sk_tile_work( TileWorkDesc &tile_work, int tile_idx, int block_iter_begin, int block_iter_end) { // The linear tile index tile_work.tile_idx = tile_idx; // The first global-scoped MAC-iteration for this tile int tile_iter_begin = tile_idx * params.block_mapping.iters_per_tile(); // The first global-scoped MAC-iteration this threadblock will perform for this tile tile_work.iter_begin = max(block_iter_begin, tile_iter_begin); // The first tile-scoped MAC-iteration this threadblock will perform for this tile int k_iter_begin = tile_work.iter_begin - tile_iter_begin; // The last (one past) tile-scoped MAC-iteration this threadblock will perform for this tile int k_iter_end = block_iter_end - tile_iter_begin; // The number of MAC-iterations this threadblock will perform for this tile tile_work.k_iters_remaining = k_iter_end - k_iter_begin; // The starting index in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_begin = k_iter_begin * Mma::Shape::kK; // The ending index (one-past) in the k-domain for MAC-iterations this threadblock will perform for this tile tile_work.k_end = min( params.block_mapping.problem_size.k(), // extent of k domain (k_iter_end * Mma::Shape::kK)); // extent of the threadblock's global iteration assignment // The location of this tile (in threadblock-tile coordinates) in the output matrix tile_work.tiled_coord = params.block_mapping.get_tile_offset(tile_work.tile_idx); } /// Share accumulators with peers CUTLASS_DEVICE void share_accumulators( AccumulatorTile const &accumulator_tile, int block_idx, int first_block_idx) { AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace); int accum_tile_offset = first_block_idx * kThreadCount; if (block_idx == first_block_idx) { // First peer initializes the workspace partials BlockStripedReduceT::store(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); } else { // Subsequent peers atomically accumulate into the workspace partials if (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) { // Non-deterministic reduction order: wait for the first peer to have initialized the partials before we add to them Barrier::wait_lt(params.barrier_workspace, thread_idx, first_block_idx, 1); } else { // Turnstile reduction order: wait until the previous peer has written int wait_count = block_idx - first_block_idx; Barrier::wait_eq(params.barrier_workspace, thread_idx, first_block_idx, wait_count); } // Perform reduction in workspace BlockStripedReduceT::reduce(accum_tile_workspace + accum_tile_offset, accumulator_tile, thread_idx); } // Signal our arrival Barrier::arrive_inc(params.barrier_workspace, thread_idx, first_block_idx); } /// Acquire accumulators from peers CUTLASS_DEVICE void acquire_accumulators( AccumulatorTile &accumulator_tile, int block_idx, int first_block_idx) { AccumulatorTile *accum_tile_workspace = reinterpret_cast<AccumulatorTile *>(params.partials_workspace); // Wait for arrival int num_carry_in = block_idx - first_block_idx; Barrier::wait_eq_reset(params.barrier_workspace, thread_idx, first_block_idx, num_carry_in); // Load and add peer-partials accumulator tile to local accumulator tile int accum_tile_offset = first_block_idx * kThreadCount; BlockStripedReduceT::load_add(accumulator_tile, accum_tile_workspace + accum_tile_offset, thread_idx); } /// Perform epilogue computations and output CUTLASS_DEVICE void do_epilogue( TileWorkDesc &tile_work, AccumulatorTile &accumulator_tile) { cutlass::gemm::GemmCoord threadblock_tile_offset{ tile_work.tiled_coord.m(), tile_work.tiled_coord.n(), tile_work.tiled_coord.k() }; // Execute the epilogue operator to update the destination tensor. epilogue( accumulator_tile, threadblock_tile_offset, params.problem_shape, thread_idx); } CUTLASS_DEVICE void separate_reduction(int reduce_idx) { int peer_idx_begin, peer_idx_last, reduce_tile_idx, reduce_fragment_idx; // Reduce by sk-tile (every tile contributed to by one or more blocks) reduce_tile_idx = reduce_idx / Epilogue::kAccumulatorFragments; reduce_fragment_idx = reduce_idx % Epilogue::kAccumulatorFragments; int iter_tile_first = reduce_tile_idx * params.block_mapping.iters_per_tile(); int iter_tile_last = iter_tile_first + params.block_mapping.iters_per_tile() - 1; peer_idx_begin = params.block_mapping.get_sk_block_idx(iter_tile_first); peer_idx_last = params.block_mapping.get_sk_block_idx(iter_tile_last); // Wait for peers to complete int peer_idx_end = peer_idx_last + 1; int num_peers = peer_idx_end - peer_idx_begin; Barrier::wait_eq_reset( params.barrier_workspace, thread_idx, (reduce_tile_idx * Epilogue::kAccumulatorFragments) + reduce_fragment_idx, num_peers); /// The location of this tile (in threadblock-tile coordinates) in the output matrix GemmCoord tiled_coord = params.block_mapping.get_tile_offset(reduce_tile_idx); // Execute the epilogue operator to update the destination tensor. epilogue.reduce( peer_idx_begin, peer_idx_end, reduce_fragment_idx, params.partials_workspace, tiled_coord, params.problem_shape, thread_idx); } CUTLASS_DEVICE void process_tile( TileWorkDesc tile_work, int block_idx, int dp_start_block_idx, int block_iter_begin) { // Initialize input iterators typename Mma::IteratorA iterator_A = init_iterator_A(tile_work, params.mode); typename Mma::IteratorB iterator_B = init_iterator_B(tile_work, params.mode); // Initialize accumulators AccumulatorTile accumulator_tile; accumulator_tile.clear(); // Initialize MMA abstraction Mma mma( shared_storage.main_loop, thread_idx, warp_idx, lane_idx); // Perform this tile's range of multiply-accumulate (MAC) iterations mma(tile_work.k_iters_remaining, accumulator_tile, iterator_A, iterator_B, accumulator_tile); if ((ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kAtomic) || (params.block_mapping.reduction_blocks == 0) || (block_idx >= dp_start_block_idx)) { // // Cooperative SK peer reduction or DP block // int first_block_idx = params.block_mapping.get_first_block_idx(tile_work.tile_idx, block_idx); if (!tile_work.tile_finished(params)) { // Non "finishing" SK blocks must share their partial accumulator sums through global scratch workspace share_accumulators(accumulator_tile, block_idx, first_block_idx); } else { // DP blocks and "finishing" SK blocks must perform epilogue operations and write the output tile if (!tile_work.tile_started()) { // A "finishing" SK block must first aggregate its accumulator partial sums with those shared by peer threadblocks acquire_accumulators(accumulator_tile, block_idx, first_block_idx); } do_epilogue(tile_work, accumulator_tile); } } else { // // Separate peer reduction // // Share accumulator partial sums with peer threadblock(s) through scratch workspace epilogue.share(block_idx, params.partials_workspace, accumulator_tile, tile_work.tile_started()); // Signal arrival Barrier::arrive_range_inc( params.barrier_workspace, thread_idx, tile_work.tile_idx * Epilogue::kAccumulatorFragments, Epilogue::kAccumulatorFragments); } } /// Executes one GEMM CUTLASS_DEVICE void gemm() { // Initialize block's iteration range int tile_idx = 0; int block_iter_begin = 0; int block_iters_remaining = 0; int block_idx = params.block_mapping.get_block_idx(); int sk_padding_start_block_idx = params.block_mapping.sk_regions() * params.block_mapping.sk_blocks_per_region(); int dp_start_block_idx = params.block_mapping.sk_waves * params.block_mapping.avail_sms; int reduce_start_block_idx = dp_start_block_idx + params.block_mapping.dp_blocks; int grid_padding_start_block_idx = reduce_start_block_idx + params.block_mapping.reduction_blocks; // Initialize tile work descriptor TileWorkDesc tile_work; bool dp_block = (block_idx >= dp_start_block_idx) && (block_idx < reduce_start_block_idx); bool sk_block = (block_idx < sk_padding_start_block_idx); bool reduce_block = (block_idx >= reduce_start_block_idx) && (block_idx < grid_padding_start_block_idx) && (ThreadblockSwizzle::kReductionStrategy == ThreadblockSwizzle::kMixed); if (dp_block) { // This is a DP block int dp_block_idx = block_idx - dp_start_block_idx; int first_dp_tile = (params.block_mapping.cohort_raster) ? 0 : params.block_mapping.sk_tiles; // Blocks in first DP wave get configured number of tiles tile_idx = first_dp_tile + dp_block_idx; int tile_allottment = params.block_mapping.dp_first_wave_tiles; // Blocks in subsequent DP waves get 1 tile if (dp_block_idx >= params.block_mapping.avail_sms) { tile_allottment = 1; tile_idx += (params.block_mapping.dp_first_wave_tiles - 1) * params.block_mapping.avail_sms; } block_iters_remaining = params.block_mapping.iters_per_tile() * tile_allottment; init_dp_tile_work(tile_work, tile_idx); // DP blocks exit if out of bounds or overlap an SK tile (only possible during cohort rasterization, where dp_first_wave_tiles must be 1) if ((tile_idx < params.block_mapping.sk_tiles) || (tile_work.tiled_coord.m() >= params.block_mapping.tiled_shape().m()) || (tile_work.tiled_coord.n() >= params.block_mapping.tiled_shape().n())) { return; } } else if (sk_block) { // This is a SK block int block_iter_end; params.block_mapping.get_iter_extents(block_idx, block_iter_begin, block_iter_end); block_iters_remaining = block_iter_end - block_iter_begin; tile_idx = params.block_mapping.get_sk_tile_idx(block_iter_end - 1); init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); } else { if (reduce_block) { // This is a reduction threadblock int reduce_block_idx = block_idx - reduce_start_block_idx; separate_reduction(reduce_block_idx); } return; } // Iteration-processing loop body CUTLASS_PRAGMA_NO_UNROLL while (true) { // Perform this block's share of work for this tile process_tile( tile_work, block_idx, dp_start_block_idx, block_iter_begin); block_iters_remaining -= tile_work.k_iters_remaining; if (block_iters_remaining == 0) { break; } // Continue to next tile __syncthreads(); if (block_idx >= dp_start_block_idx) { // DP block consume their tiles at stride tile_idx += params.block_mapping.avail_sms; init_dp_tile_work(tile_work, tile_idx); } else { // SK blocks consume their tiles in backwards order tile_idx--; init_sk_tile_work(tile_work, tile_idx, block_iter_begin, block_iter_begin + block_iters_remaining); } } } public: // // Device-only API // // Factory invocation CUTLASS_DEVICE static void invoke( Params const &params, SharedStorage &shared_storage) { GemmWithEpilogueVisitorStreamk op(params, shared_storage); op(); } CUTLASS_DEVICE GemmWithEpilogueVisitorStreamk( Params const &params, SharedStorage &shared_storage) : params(params), shared_storage(shared_storage), thread_idx(threadIdx.x), warp_idx(__shfl_sync(0xffffffff, threadIdx.x / 32, 0)), // broadcast the warp_id computed by lane 0 to ensure dependent code lane_idx(threadIdx.x % 32), epilogue( params.output_op, shared_storage.epilogue, thread_idx, warp_idx, lane_idx) {} /// Executes one GEMM CUTLASS_DEVICE void operator()() { // Generic SK code path gemm(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h/0
{ "file_path": "include/cutlass/gemm/kernel/gemm_universal_with_visitor_streamk.h", "repo_id": "include", "token_count": 11099 }
32
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/kernel_hardware_info.hpp" #include "cute/arch/cluster_sm90.hpp" #include "cutlass/arch/mma_sm90.h" #include "cutlass/epilogue/collective/detail.hpp" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/gemm/kernel/sm90_tile_scheduler.hpp" #include "cutlass/trace.h" #include "cute/tensor.hpp" /////////////////////////////////////////////////////////////////////////////// namespace cutlass::gemm::kernel { namespace detail { // IF_SWAP_AB<T>::value will be true only if: // class T has member SwapAB and T::SwapAB is true template <typename T, typename = void> struct IF_SWAP_AB { static constexpr bool value = false; }; template <typename T> struct IF_SWAP_AB <T, void_t<decltype(T::SwapAB)>> { static constexpr bool value = T::SwapAB; }; } // namespace /////////////////////////////////////////////////////////////////////////////// template < class ProblemShape_, class CollectiveMainloop_, class CollectiveEpilogue_, class TileScheduler_ > class GemmUniversal< ProblemShape_, CollectiveMainloop_, CollectiveEpilogue_, TileScheduler_, cute::enable_if_t<cute::is_base_of_v<KernelTma, typename CollectiveMainloop_::DispatchPolicy::Schedule>>> { public: // // Type Aliases // using ProblemShape = ProblemShape_; static_assert(cute::rank(ProblemShape{}) == 3 or cute::rank(ProblemShape{}) == 4, "ProblemShape{} should be <M,N,K> or <M,N,K,L>"); // Mainloop derived types using CollectiveMainloop = CollectiveMainloop_; using TileShape = typename CollectiveMainloop::TileShape; using TiledMma = typename CollectiveMainloop::TiledMma; using ArchTag = typename CollectiveMainloop::ArchTag; using ElementA = typename CollectiveMainloop::ElementA; using StrideA = typename CollectiveMainloop::StrideA; using ElementB = typename CollectiveMainloop::ElementB; using StrideB = typename CollectiveMainloop::StrideB; using DispatchPolicy = typename CollectiveMainloop::DispatchPolicy; using ElementAccumulator = typename CollectiveMainloop::ElementAccumulator; using ClusterShape = typename DispatchPolicy::ClusterShape; using MainloopArguments = typename CollectiveMainloop::Arguments; using MainloopParams = typename CollectiveMainloop::Params; static_assert(ArchTag::kMinComputeCapability >= 90); // Epilogue derived types using CollectiveEpilogue = CollectiveEpilogue_; using ElementC = typename CollectiveEpilogue::ElementC; using StrideC = typename CollectiveEpilogue::StrideC; using ElementD = typename CollectiveEpilogue::ElementD; using StrideD = typename CollectiveEpilogue::StrideD; using EpilogueArguments = typename CollectiveEpilogue::Arguments; using EpilogueParams = typename CollectiveEpilogue::Params; static_assert(cute::is_same_v<ElementAccumulator, typename CollectiveEpilogue::ElementAccumulator>, "Mainloop and epilogue do not agree on accumulator value type."); static_assert(cute::is_void_v<TileScheduler_> or cute::is_same_v<TileScheduler_, PersistentScheduler>, "TMA kernel does not support specializing the tile scheduler."); using TileSchedulerTag = TileScheduler_; using TileScheduler = typename detail::TileSchedulerSelector< TileScheduler_, ArchTag, TileShape, ClusterShape>::Scheduler; using TileSchedulerArguments = typename TileScheduler::Arguments; static constexpr int SharedStorageSize = static_cast<int>(cute::max( sizeof(typename CollectiveMainloop::SharedStorage), sizeof(typename CollectiveEpilogue::SharedStorage))); static constexpr uint32_t MaxThreadsPerBlock = CollectiveMainloop::ThreadCount; static constexpr uint32_t MinBlocksPerMultiprocessor = 1; // Device side arguments struct Arguments { GemmUniversalMode mode{}; ProblemShape problem_shape{}; MainloopArguments mainloop{}; EpilogueArguments epilogue{}; KernelHardwareInfo hw_info{}; TileSchedulerArguments scheduler{}; }; // Kernel entry point API struct Params { GemmUniversalMode mode{}; ProblemShape problem_shape{}; MainloopParams mainloop{}; EpilogueParams epilogue{}; }; // // Methods // // Convert to underlying arguments. In this case, a simple copy for the aliased type. static Params to_underlying_arguments(Arguments const& args, void* workspace) { (void) workspace; auto problem_shape = args.problem_shape; if constexpr (detail::IF_SWAP_AB<CollectiveMainloop>::value) { // swap M/N get<0>(problem_shape) = get<1>(args.problem_shape); get<1>(problem_shape) = get<0>(args.problem_shape); } return { args.mode, problem_shape, CollectiveMainloop::to_underlying_arguments(args.problem_shape, args.mainloop, workspace), CollectiveEpilogue::to_underlying_arguments(args.problem_shape, args.epilogue, workspace) }; } CUTLASS_HOST_DEVICE static bool can_implement(Arguments const& args) { bool implementable = (args.mode == GemmUniversalMode::kGemm) or (args.mode == GemmUniversalMode::kBatched && cute::rank(ProblemShape{}) == 4); if (!implementable) { CUTLASS_TRACE_HOST(" CAN IMPLEMENT: Arguments or Problem Shape don't meet the requirements.\n"); return implementable; } implementable &= CollectiveMainloop::can_implement(args.problem_shape, args.mainloop); implementable &= CollectiveEpilogue::can_implement(args.problem_shape, args.epilogue); implementable &= TileScheduler::can_implement(args.scheduler); return implementable; } static size_t get_workspace_size(Arguments const& args) { return 0; } static cutlass::Status initialize_workspace(Arguments const& args, void* workspace = nullptr, cudaStream_t stream = nullptr, CudaHostAdapter* cuda_adapter = nullptr) { return Status::kSuccess; } // Computes the kernel launch grid shape based on runtime parameters static dim3 get_grid_shape(Params const& params) { auto cluster_shape = ClusterShape{}; auto tile_shape = TileShape{}; auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); return TileScheduler::get_tiled_cta_shape_mnl( problem_shape_MNKL, tile_shape, cluster_shape); } static dim3 get_block_shape() { return dim3(MaxThreadsPerBlock, 1, 1); } CUTLASS_DEVICE void operator()(Params const& params, char* smem_buf) { using namespace cute; using X = Underscore; // Any Tensor Op MMA Atom in the WGMMA ISA is arch conditional to sm90a. #if ! defined(__CUDA_ARCH_FEAT_SM90_ALL) printf("ERROR : Arch conditional MMA instruction used without targeting sm90a compute capability. Aborting.\n"); #else // Preconditions static_assert(cute::rank(StrideA{}) == 3, "StrideA must be rank-3: [M, K, L]. If batch mode is not needed, set L stride to Int<0>."); static_assert(cute::rank(StrideB{}) == 3, "StrideB must be rank-3: [N, K, L]. If batch mode is not needed, set L stride to Int<0>."); static_assert(cute::rank(StrideC{}) == 3, "StrideC must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); static_assert(cute::rank(StrideD{}) == 3, "StrideD must be rank-3: [M, N, L]. If batch mode is not needed, set L stride to Int<0>."); int thread_idx = int(threadIdx.x); int warp_idx = canonical_warp_idx_sync(); int lane_predicate = cute::elect_one_sync(); uint32_t block_rank_in_cluster = cute::block_rank_in_cluster(); // Issue Tma Descriptor Prefetch from a single thread if ((warp_idx == 0) && lane_predicate) { CollectiveMainloop::prefetch_tma_descriptors(params.mainloop); } // Separate out problem shape for convenience // Optionally append 1s until problem shape is rank-4 in case its is only rank-3 (MNK) auto problem_shape_MNKL = append<4>(params.problem_shape, Int<1>{}); auto M = get<0>(problem_shape_MNKL); auto N = get<1>(problem_shape_MNKL); auto K = get<2>(problem_shape_MNKL); auto L = get<3>(problem_shape_MNKL); // TMA requires special handling of strides to deal with coord codomain mapping // Represent the full tensors -- get these from TMA Tensor mA_mkl = params.mainloop.tma_load_a.get_tma_tensor(make_shape(M,K,L)); // (m,k,l) Tensor mB_nkl = params.mainloop.tma_load_b.get_tma_tensor(make_shape(N,K,L)); // (n,k,l) // Get the appropriate blocks for this thread block -- potential for thread block locality auto blk_shape = TileShape{}; // (BLK_M,BLK_N,BLK_K) auto blk_coord = make_coord(_,_,_); // (m,n,k) -- defer the slice // Make tiled views Tensor gA_mkl = local_tile(mA_mkl, blk_shape, blk_coord, Step<_1, X,_1>{}); // (BLK_M,BLK_K,m,k,l) Tensor gB_nkl = local_tile(mB_nkl, blk_shape, blk_coord, Step< X,_1,_1>{}); // (BLK_N,BLK_K,n,k,l) // Compute m_coord, n_coord, and l_coord with their post-tiled shapes auto m_coord = idx2crd(int(blockIdx.x), shape<2>(gA_mkl)); auto n_coord = idx2crd(int(blockIdx.y), shape<2>(gB_nkl)); auto l_coord = idx2crd(int(blockIdx.z), shape<4>(gB_nkl)); auto output_tile_coord = make_coord(m_coord, n_coord, _, l_coord); // Slice with m_coord and n_coord Tensor gA = gA_mkl(_,_,m_coord,_,l_coord); // (BLK_M,BLK_K,k) Tensor gB = gB_nkl(_,_,n_coord,_,l_coord); // (BLK_N,BLK_K,k) // Allocate the tiled_mma and the accumulators for the (M,N) blk_shape TiledMma tiled_mma; Tensor accumulators = partition_fragment_C(tiled_mma, take<0,2>(blk_shape)); // (MMA,MMA_M,MMA_N) auto k_tile_iter = cute::make_coord_iterator(shape<2>(gA)); auto k_tile_count = size<2>(gA); // Perform the collective scoped MMA CollectiveMainloop collective_mma; collective_mma( gA, params.mainloop.tma_load_a, gB, params.mainloop.tma_load_b, accumulators, k_tile_iter, k_tile_count, thread_idx, block_rank_in_cluster, smem_buf, params.mainloop ); constexpr int BLK_M_RANK = cute::rank<0>(blk_shape); bool m_oob = int(blockIdx.x) >= size<2>(gA_mkl); auto m_max_coord = unwrap(cute::transform(make_seq<BLK_M_RANK>{}, [&](auto i) { return m_oob ? 0 : get<i>(M) - get<0,i>(blk_shape) * get<i>(m_coord); })); constexpr int BLK_N_RANK = cute::rank<1>(blk_shape); bool n_oob = int(blockIdx.y) >= size<2>(gB_nkl); auto n_max_coord = unwrap(cute::transform(make_seq<BLK_N_RANK>{}, [&](auto i) { return n_oob ? 0 : get<i>(N) - get<1,i>(blk_shape) * get<i>(n_coord); })); auto residue_mnk = make_tuple(m_max_coord, n_max_coord, Int<0>{}); // Epilogue and write to gD CollectiveEpilogue epilogue{params.epilogue}; epilogue( problem_shape_MNKL, blk_shape, output_tile_coord, accumulators, tiled_mma, residue_mnk, thread_idx, smem_buf ); #endif } }; /////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::gemm::kernel
include/cutlass/gemm/kernel/sm90_gemm_tma.hpp/0
{ "file_path": "include/cutlass/gemm/kernel/sm90_gemm_tma.hpp", "repo_id": "include", "token_count": 4951 }
33
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once /*! \file \brief Parameters structures for persistent tile schedulers */ #include "cutlass/coord.h" #include "cutlass/kernel_hardware_info.h" #include "cutlass/workspace.h" #include "cutlass/platform/platform.h" #include "cutlass/fast_math.h" #include "cutlass/gemm_coord.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { namespace detail { //////////////////////////////////////////////////////////////////////////////// // // Parameters for SM90 tile schedulers // // Parameters for SM90 persistent tile scheduler struct PersistentTileSchedulerSm90Params { enum class RasterOrder { AlongM, AlongN }; enum class RasterOrderOptions { Heuristic, AlongM, AlongN }; FastDivmodU64Pow2 divmod_cluster_shape_major_{}; FastDivmodU64Pow2 divmod_cluster_shape_minor_{}; FastDivmodU64 divmod_batch_{}; FastDivmodU64 divmod_cluster_blk_major_{}; uint64_t blocks_per_problem_ = 0; int32_t log_swizzle_size_ = 0; RasterOrder raster_order_ = RasterOrder::AlongN; uint32_t problem_tiles_m_ = 0; uint32_t problem_tiles_n_ = 0; uint32_t problem_tiles_l_ = 0; uint32_t cluster_shape_m_ = 0; uint32_t cluster_shape_n_ = 0; // Initializes members. This variant of the method should only be used when // problem_shape and tile_shape contain modes of only rank 1. void initialize( BatchedGemmCoord problem_shape, GemmCoord tile_shape, GemmCoord cluster_shape, KernelHardwareInfo const& hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option ) { dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape, tile_shape, cluster_shape); return initialize( problem_blocks, cluster_shape, hw_info, max_swizzle_size, raster_order_option ); } // Version of initialize that takes in as input the number of CTAs in the M and N and L dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. void initialize( dim3 problem_blocks, GemmCoord cluster_shape, KernelHardwareInfo const& hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option ) { CUTLASS_UNUSED(hw_info); // Round up to nearest multiple of swizzle_size along each mode auto log_swizzle_size = get_log_swizzle_size(problem_blocks.x, problem_blocks.y, max_swizzle_size); auto problem_blocks_m = round_up(problem_blocks.x, (1 << log_swizzle_size) * cluster_shape.m()); auto problem_blocks_n = round_up(problem_blocks.y, (1 << log_swizzle_size) * cluster_shape.n()); problem_tiles_m_ = problem_blocks_m / cluster_shape.m(); problem_tiles_n_ = problem_blocks_n / cluster_shape.n(); problem_tiles_l_ = problem_blocks.z; cluster_shape_m_ = cluster_shape.m(); cluster_shape_n_ = cluster_shape.n(); RasterOrder raster_order = get_rasterization_order( problem_blocks_m, problem_blocks_n, raster_order_option ); // // Set members // blocks_per_problem_ = problem_blocks_m * problem_blocks_n * problem_blocks.z; log_swizzle_size_ = log_swizzle_size; raster_order_ = raster_order; divmod_batch_ = FastDivmodU64(problem_blocks_m * problem_blocks_n); if (raster_order == RasterOrder::AlongN) { divmod_cluster_shape_major_ = FastDivmodU64Pow2(cluster_shape.n()); divmod_cluster_shape_minor_ = FastDivmodU64Pow2(cluster_shape.m()); divmod_cluster_blk_major_ = FastDivmodU64(problem_blocks_n / cluster_shape.n()); } else { divmod_cluster_shape_major_ = FastDivmodU64Pow2(cluster_shape.m()); divmod_cluster_shape_minor_ = FastDivmodU64Pow2(cluster_shape.n()); divmod_cluster_blk_major_ = FastDivmodU64(problem_blocks_m / cluster_shape.m()); } } // Given the inputs, computes the physical grid we should launch. // This variant of the method should only be used when // problem_shape and tile_shape contain modes of only rank 1. CUTLASS_HOST_DEVICE static dim3 get_grid_shape( BatchedGemmCoord problem_shape, GemmCoord cta_shape, GemmCoord cluster_shape, KernelHardwareInfo hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option, bool truncate_by_problem_size=true) { dim3 problem_blocks = get_tiled_cta_shape_mnl(problem_shape, cta_shape, cluster_shape); return get_grid_shape( problem_blocks, cluster_shape, hw_info, max_swizzle_size, raster_order_option, truncate_by_problem_size ); } // Version of get_grid_shape that takes in as input the number of CTAs in the M and N and L dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. CUTLASS_HOST_DEVICE static dim3 get_grid_shape( dim3 problem_blocks, GemmCoord cluster_shape, KernelHardwareInfo hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option, bool truncate_by_problem_size=true) { int const sm_count = hw_info.sm_count; // Round up to nearest multiple of swizzle_size along each mode auto log_swizzle_size = get_log_swizzle_size(problem_blocks.x, problem_blocks.y, max_swizzle_size); auto problem_blocks_m = round_up(problem_blocks.x, (1 << log_swizzle_size) * cluster_shape.m()); auto problem_blocks_n = round_up(problem_blocks.y, (1 << log_swizzle_size) * cluster_shape.n()); int problem_blocks_total = problem_blocks_m * problem_blocks_n * problem_blocks.z; RasterOrder raster_order = get_rasterization_order( problem_blocks_m, problem_blocks_n, raster_order_option ); dim3 launch_grid; if (raster_order == RasterOrder::AlongN) { launch_grid = dim3(cluster_shape.m(), 1, 1); } else { launch_grid = dim3(1, cluster_shape.n(), 1); } auto possibly_truncate = [&](int x, int y) { if (truncate_by_problem_size) { return platform::min(x, y); } else { return x; } }; // The else path is generic, however, we can avoid some divs if we know cluster size is 1 auto cluster_size = cluster_shape.m() * cluster_shape.n(); if (cluster_size == 1) { if (raster_order == RasterOrder::AlongN) { launch_grid.y = possibly_truncate(sm_count, problem_blocks_total); } else { launch_grid.x = possibly_truncate(sm_count, problem_blocks_total); } } else { /* * Optimal grid size calculation is based on * GH100: 8 GPCs, 72 TPCs (9 TPCs/GPC), 2 SMs/TPC, 144 SMs per full GPU * Hence, maximum SMs per GPC = 18 */ constexpr int max_sm_per_gpc = 18; // Provided SM count could possibly be less than the assumed maximum SMs per GPC auto cluster_size = cluster_shape.m() * cluster_shape.n(); int const min_num_gpc = sm_count < max_sm_per_gpc ? 1 : sm_count / max_sm_per_gpc; int const max_cta_occupancy_per_gpc = max_sm_per_gpc - (max_sm_per_gpc % cluster_size); int cta_per_device = min_num_gpc * max_cta_occupancy_per_gpc; // The calculation below allows for larger grid size launch for different GPUs. int const num_gpc_residual = sm_count < max_sm_per_gpc ? 0 : sm_count % max_sm_per_gpc; int const max_cta_occupancy_per_residual_gpc = num_gpc_residual - (num_gpc_residual % cluster_size); cta_per_device += max_cta_occupancy_per_residual_gpc; cta_per_device = sm_count < cta_per_device ? sm_count : cta_per_device; if (raster_order == RasterOrder::AlongN) { launch_grid.y = possibly_truncate( cta_per_device / cluster_shape.m(), problem_blocks_total / cluster_shape.m()); } else { launch_grid.x = possibly_truncate( cta_per_device / cluster_shape.n(), problem_blocks_total / cluster_shape.n()); } } return launch_grid; } CUTLASS_HOST_DEVICE static int32_t get_log_swizzle_size(int problem_ctas_m, int problem_ctas_n, int max_swizzle_size) { int min_cta_dim = platform::min(problem_ctas_m, problem_ctas_n); if (max_swizzle_size >= 8 && min_cta_dim >= 6) { return 3; } else if (max_swizzle_size >= 4 && min_cta_dim >= 3) { return 2; } else if (max_swizzle_size >= 2 && min_cta_dim >= 2) { return 1; } else { return 0; } } CUTLASS_HOST_DEVICE static RasterOrder get_rasterization_order( uint32_t tiles_m, uint32_t tiles_n, RasterOrderOptions raster_order_option ) { if (raster_order_option == RasterOrderOptions::Heuristic) { if (tiles_n > tiles_m) { return RasterOrder::AlongM; } else { return RasterOrder::AlongN; } } else { switch (raster_order_option) { case RasterOrderOptions::AlongN: return RasterOrder::AlongN; break; default: return RasterOrder::AlongM; } } } // Get the number of CTA tiles in this problem. This variant of the method should only be used when // problem_shape and tile_shape contain modes of only rank 1. CUTLASS_HOST_DEVICE static dim3 get_tiled_cta_shape_mnl(BatchedGemmCoord problem_shape, GemmCoord cta_shape, GemmCoord cluster_shape) { auto cta_m = (problem_shape.m() + cta_shape.m() - 1) / cta_shape.m(); auto cta_n = (problem_shape.n() + cta_shape.n() - 1) / cta_shape.n(); return get_tiled_cta_shape_mnl(problem_shape, cluster_shape, cta_m, cta_n); } // Version of get_tiled_cta_shape_mnl that takes in as input the number of CTAs in the M and N dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. CUTLASS_HOST_DEVICE static dim3 get_tiled_cta_shape_mnl(BatchedGemmCoord problem_shape, GemmCoord cluster_shape, uint32_t cta_m, uint32_t cta_n) { // Round up to nearest multiple of cluster dim along each mode auto problem_blocks_m = ((cta_m + cluster_shape.m() - 1) / cluster_shape.m()) * cluster_shape.m(); auto problem_blocks_n = ((cta_n + cluster_shape.n() - 1) / cluster_shape.n()) * cluster_shape.n(); return { static_cast<uint32_t>(problem_blocks_m), static_cast<uint32_t>(problem_blocks_n), static_cast<uint32_t>(problem_shape.batch()) }; } }; //////////////////////////////////////////////////////////////////////////////// // Parameters for SM90 persistent stream-K scheduler struct PersistentTileSchedulerSm90StreamKParams { // Strategies for computing reductions between CTAs computing portions of a given output tile enum class ReductionMode { // Participating CTAs perform reduction in a turnstile fashion in order of the K extent // covered by each CTA. This requires a lock to be held exclusively be the CTA that is // currently accumulating. // // Turnstile accumulation ensures deterministic numeric behavior when using this mode. Deterministic, // Participating CTAs perform reduction atomically to the same workspace (mostly) without locking. // Locks are used only to wait for the first CTA to write its partial values (to initialize the // workspace), and for all but the final CTA to have accumulated (so that the final CTA can load // the accumulated value and accumulate it into registers on top of which the epilogue will // be performed). // // Due to the nondeterminsitic ordering of accumulation, deterministic numeric behavior cannot // be guaranteed with this mode (e.g., floating-point rounding error will depend on the order // of accumulation) Nondeterministic }; // Strategies for decomposing the problem enum class DecompositionMode { // Use a heuristic to determine whether data-parallel, split-K, or stream-K decomposition should be performed Heuristic, // Force a data-parallel decomposition DataParallel, // Force a split-K decomposition. This should be paired with setting the `splits` parameter SplitK, // Force a stream-K decomposition StreamK }; using UnderlyingParams = PersistentTileSchedulerSm90Params; using RasterOrder = UnderlyingParams::RasterOrder; using RasterOrderOptions = UnderlyingParams::RasterOrderOptions; // Cluster dimensions are typically always a power of 2, so use // the power-of-two variants of FastDivmod for these. FastDivmodU64Pow2 divmod_cluster_shape_major_{}; FastDivmodU64Pow2 divmod_cluster_shape_minor_{}; FastDivmodU64 divmod_batch_{}; FastDivmodU64 divmod_cluster_blk_major_{}; // Total number of cluster-sized output tiles (i.e., not including any // splitting factors). This is primarily used for split-K decompositions, // and may be overridden in other decompositions. FastDivmodU64 divmod_clusters_mnl_{}; // We divide up the number of stream-K tiles amongst G groups of stream-K units. // The stream-K units within a group collaborate to comptue over the `sk_tiles / G` // tiles assigned to that group. Non-unit group sizes can help to preserve L2 locality of // partial chunks computed by stream-K units -- units 0 in each group will compute identical K extents // of tiles that would be assigned in the same wave according to the rasterization order of the // data-parallel formulation of the problem. FastDivmodU64 divmod_sk_groups_{}; // Number of stream-K units in each group FastDivmodU64 divmod_sk_units_per_group_{}; uint64_t units_per_problem_ = 0; FastDivmod divmod_tiles_per_output_tile_{}; int32_t log_swizzle_size_ = 0; RasterOrder raster_order_ = RasterOrder::AlongN; // The splitting factor to be used in a split-K decomposition of the problem. // If this is set to a value greater than 1, stream-K decomposition logic // is bypassed in favor of a split-K decomposition. uint32_t splits_ = 1; // Number of stream-K or split-K work units that compute an extra k iteration. // This is done to handle residuals in dividing up the k iteration space. // For stream-K, since the actual assignment of work to stream-K units will be done // at the granularity of a cluster, we store only the number of big clusters. uint32_t big_units_ = 0; // The number of groups of stream-K units that will process an extra stream-K tile cluster. uint32_t big_groups_ = 0; // Workspace for holding partial accumulators to be reduced across stream-K/split-K units void* reduction_workspace_ = nullptr; // Number of tiles covered by stream-K work units uint32_t sk_tiles_ = 0; // Number of work units computing stream-K tiles uint32_t sk_units_ = 0; // Number of tiled k iterations computed by each stream-K work unit. This // can potentially cover more than one output tile. uint32_t k_tiles_per_sk_unit_ = 0; // Strategy to use when reducing between collaborating CTAs ReductionMode reduction_mode_ = ReductionMode::Deterministic; // The number of sub blocks in the kernel epilogue FastDivmodU64 divmod_epilogue_subtile_{}; // The number of blocks that launched for doing separate reduction uint32_t separate_reduction_units_ = 0; // Minimum number of k tiles that can be assigned to a stream-K unit static constexpr uint32_t min_iters_per_sk_unit_ = 8u; // Maximum number of groups of stream-K units static constexpr uint32_t max_sk_groups_ = 8u; // Divides dividend by the cluster size CUTLASS_HOST_DEVICE uint64_t div_cluster_size(uint64_t dividend) const { // Use each underlying fast divmod rather than performing integer division // by the multiplication of major.divisor * minor.divisor return divmod_cluster_shape_minor_.divide( divmod_cluster_shape_major_.divide(dividend) ); } CUTLASS_HOST_DEVICE uint64_t get_cluster_size() const { return divmod_cluster_shape_minor_.divisor * divmod_cluster_shape_major_.divisor; } // Returns whether the kernel uses separate reduction CUTLASS_HOST_DEVICE bool requires_separate_reduction() const { return separate_reduction_units_ > 0; } // Returns the maximum number of peers that can collaborate on a given output tile CUTLASS_HOST_DEVICE static uint32_t max_peers_per_tile(uint64_t sk_units, uint64_t sk_tiles) { // When we can divide up our SK units to SK tiles evenly, the number of peers // per SK tile is exactly (sk_units_ / sk_tiles_). In cases where this division // is not exact, some tiles will need to be covered by additional SK units. Because // the extra work can occur at both the beginning and the end of the SK tile, at // most 2 extra peers will be needed. return static_cast<uint32_t>(sk_units / sk_tiles + 2); } // Initializes members. This variant of the method should only be used when // problem_shape and tile_shape contain modes of only rank 1. void initialize( BatchedGemmCoord problem_shape, GemmCoord tile_shape, GemmCoord cluster_shape, KernelHardwareInfo hw_info, int splits, int max_swizzle, RasterOrderOptions raster_order_option, ReductionMode reduction_mode, DecompositionMode decomposition_mode, void* workspace, const uint32_t epilogue_subtile = 1 ) { dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl( problem_shape, tile_shape, cluster_shape); // Number of k tiles in each output tile uint32_t k_tiles_per_output_tile = (problem_shape.k() + tile_shape.k() - 1) / tile_shape.k(); initialize( problem_blocks, k_tiles_per_output_tile, cluster_shape, hw_info, splits, max_swizzle, raster_order_option, reduction_mode, decomposition_mode, workspace, epilogue_subtile ); } // Version of initialize that takes in as input the number of CTAs in the M and N and L dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. void initialize( dim3 problem_blocks, uint32_t k_tiles_per_output_tile, GemmCoord cluster_shape, KernelHardwareInfo hw_info, int splits, int max_swizzle, RasterOrderOptions raster_order_option, ReductionMode reduction_mode, DecompositionMode decomposition_mode, void* workspace, const uint32_t epilogue_subtile = 1 ) { UnderlyingParams underlying_params; underlying_params.initialize( problem_blocks, cluster_shape, hw_info, max_swizzle, raster_order_option ); auto problem_blocks_l = problem_blocks.z; auto problem_blocks_m = round_up(problem_blocks.x, (1 << underlying_params.log_swizzle_size_) * cluster_shape.m()); auto problem_blocks_n = round_up(problem_blocks.y, (1 << underlying_params.log_swizzle_size_) * cluster_shape.n()); uint64_t output_tiles = problem_blocks_m * problem_blocks_n * problem_blocks_l; // Reduction workspace is at the beginning of the workspace. Lock workspace follows. void* reduction_workspace = workspace; if (decomposition_mode == DecompositionMode::SplitK || (decomposition_mode == DecompositionMode::Heuristic && splits > 1)) { // Short circuit to basic split-K decomposition // Don't split by more than the available number of SMs if (splits > hw_info.sm_count) { splits = hw_info.sm_count; } // Don't split by more than the K tile iterations // // splits is almost certainly nonnegative here (e.g., hw_info.sm_count, // despite being an int, is a count), so it can safely be converted to unsigned // in the comparison to avoid a signed-unsigned comparison warning-as-error. if (static_cast<decltype(k_tiles_per_output_tile)>(splits) > k_tiles_per_output_tile) { splits = k_tiles_per_output_tile; } set_params_basic( underlying_params, problem_blocks_m, problem_blocks_n, problem_blocks_l, splits, k_tiles_per_output_tile, reduction_workspace, reduction_mode ); return; } // Calculate the maximum number of blocks from clusters of shape cluster_shape that we // can fit within sm_count SMs. dim3 grid = get_grid_shape( problem_blocks, cluster_shape, hw_info, max_swizzle, raster_order_option ); uint64_t ctas_per_wave = grid.x * grid.y; auto cluster_size = cluster_shape.m() * cluster_shape.n(); // The number of output tiles to be computed in stream-K and data-parallel fashion, respectively. uint32_t sk_tiles = get_num_sk_tiles( output_tiles, ctas_per_wave, cluster_size, k_tiles_per_output_tile, decomposition_mode ); uint64_t dp_tiles = output_tiles - sk_tiles; // Calculate the number of work units covering the data-parallel and stream-K tiles. // A "work unit" is a single index in the linearized ID space used by the scheduler. // We distinguish it from a "block," which is typically tied to a hardware unit // (e.g., the callers into this scheduler will be persistent thread blocks). // A work unit can encompass multiple output tiles worth of work (as will be the // case for stream-K blocks). // Since splitting is not required for data-parallel tiles, only one data-parallel unit // is needed per data-parallel tile. uint64_t dp_units = dp_tiles; uint64_t ctas_per_sk_wave = ctas_per_wave; uint64_t sk_units = get_num_sk_units(cluster_shape, ctas_per_sk_wave, sk_tiles, k_tiles_per_output_tile); if (decomposition_mode == DecompositionMode::DataParallel || (decomposition_mode == DecompositionMode::Heuristic && sk_tiles == 0) || sk_units == 0) { // Short circuit to basic data-parallel decomposition set_params_basic( underlying_params, problem_blocks_m, problem_blocks_n, problem_blocks_l, /* splits = */ 1, k_tiles_per_output_tile, reduction_workspace, reduction_mode ); return; } bool do_separate_reduction = should_perform_separate_reduction( epilogue_subtile, sk_units, sk_tiles, dp_tiles, ctas_per_wave); // Determine the number of stream-K groups that will be used. We currently use // max_sk_groups_ unless this extends beyond the extent of the dimension over // which the problem is rasterized. For example, if the tiled problem shape // (in CTA_M x CTA_N representation) when using 1x1 clusters is 4x16, // and we rasterize along the M dimension, we choose 4 groups, rather than 8. // If the cluster shape is 2x1, we choose 2 groups (CTA_M / CLUSTER_M). uint32_t max_groups_problem; if (underlying_params.raster_order_ == RasterOrder::AlongM) { max_groups_problem = problem_blocks_m / cluster_shape.m(); } else { max_groups_problem = problem_blocks_n / cluster_shape.n(); } // Select the number of groups that will be use. We start with the maximum // number of potential groups, and iterate down looking for a group size that // evenly divides the stream-K units and tiles, and for which the resulting // number of K tiles per stream-K unit remains above min_iters_per_sk_unit_ uint32_t groups = platform::min(max_groups_problem, uint32_t(max_sk_groups_)); // Grouping is disabled when separate reduction is used if (do_separate_reduction) { groups = 1; } uint32_t fallback_groups = 0; auto sk_cluster_tiles = sk_tiles / cluster_size; auto sk_cluster_units = sk_units / cluster_size; auto sk_splits_too_small = [&](uint32_t g) { // Check whether the number of K tiles computed per stream-K unit is less // than min_iters_per_sk_unit_ auto total_sk_k_tiles = (sk_tiles / g) * k_tiles_per_output_tile; auto k_tiles_per_sk_unit = total_sk_k_tiles / (sk_units / g); return k_tiles_per_sk_unit < min_iters_per_sk_unit_; }; auto is_ideal_grouping = [&](uint32_t g) { // An ideal grouping will evenly divide stream-K clusters, evenly divide // stream-K tiles, and not result in stream-K splits that are too small. return (sk_cluster_units % g == 0) && (sk_cluster_tiles % g == 0) && !sk_splits_too_small(g); }; auto is_valid_grouping = [&](uint32_t g) { // A grouping is valid, but not ideal, if it evenly divides the // stream-K clusters and does not result in stream-K splits that are // too small. Such a setting can be used as a fallback option in the // case that an ideal grouping is not achievable return sk_cluster_units % g == 0 && !sk_splits_too_small(g); }; while (groups > 1 && !is_ideal_grouping(groups)) { if (fallback_groups == 0 && is_valid_grouping(groups)) { // Set fallback groups once in preference for a larger number of groups. fallback_groups = groups; } --groups; } // If groups == 1, we did not find a group count that satisfies all criteria. If we have // found a fallback group count, use this instead. if (groups == 1 && fallback_groups > 0) { groups = fallback_groups; } auto sk_units_per_group = sk_units / groups; // sk_tiles is guaranteed to be divisible by cluster_size because it is calculated as: // sk_tiles = (waves <= 2) ? total_tiles : (sm_count + (total_tiles % sm_count)) // Both total_tiles and sm_count are multiples of cluster size due to padding added // prior to kernel launch. uint64_t sk_clustered_tiles = sk_tiles / cluster_size; uint64_t sk_clustered_tiles_per_group = sk_clustered_tiles / groups; uint64_t sk_tiles_per_group = sk_clustered_tiles_per_group * cluster_size; // Groups that will process an extra stream-K tile cluster. These differ from "big_units," which // are stream-K units within a group that process an extra K chunk. uint64_t sk_big_groups = sk_clustered_tiles % groups; uint64_t k_tiles_per_group = k_tiles_per_output_tile * sk_tiles_per_group; // Number of k tiles computed per stream-K unit uint64_t k_tiles_per_sk_unit = k_tiles_per_group / sk_units_per_group; uint32_t reduction_units = 0; // Use separate reduction when we have less than one wave of output tiles (dp_tiles == 0) // and when each tile will be operated on by at least two stream-K units (sk_units > 2 * sk_tiles) if (do_separate_reduction) { // Each reduction unit will reduce the partials of an epilogue subtile for // a given output tile and compute the epilogue. Thus, there are as many reduction // units as there are epilogue subtiles. reduction_units = sk_tiles * epilogue_subtile; } else if (decomposition_mode == DecompositionMode::Heuristic && sk_tiles < sk_units && sk_units % sk_tiles == 0) { // If the number of stream-K units is a multiple of the number of stream-K tiles, then // the problem can leverage a basic split-K decomposition for the stream-K tiles. // This case happens when separate reduction is disable. uint32_t sk_splits = static_cast<uint32_t>(sk_units / sk_tiles); set_params_basic( underlying_params, problem_blocks_m, problem_blocks_n, problem_blocks_l, sk_splits, k_tiles_per_output_tile, reduction_workspace, reduction_mode ); return; } divmod_cluster_shape_major_ = underlying_params.divmod_cluster_shape_major_; divmod_cluster_shape_minor_ = underlying_params.divmod_cluster_shape_minor_; divmod_batch_ = underlying_params.divmod_batch_; divmod_tiles_per_output_tile_ = FastDivmod(k_tiles_per_output_tile); divmod_cluster_blk_major_ = underlying_params.divmod_cluster_blk_major_; divmod_sk_groups_ = FastDivmodU64(static_cast<uint64_t>(groups)); divmod_sk_units_per_group_ = FastDivmodU64(static_cast<uint64_t>(sk_units / groups)); // Override divmod_clusters_mnl_ to be the number of cluster-sized stream-K units. // This setting ensures that the use of this divmod for stream-K decompositions // is essentially a no-op. divmod_clusters_mnl_ = FastDivmodU64(sk_units / cluster_size); splits_ = 1; log_swizzle_size_ = underlying_params.log_swizzle_size_; units_per_problem_ = static_cast<uint32_t>(dp_units + sk_units); raster_order_ = underlying_params.raster_order_; // Assign big_units_ assuming that group count == 1. This is unused by stream-K // when group count > 1. big_units_ = static_cast<uint32_t>(k_tiles_per_group % k_tiles_per_sk_unit); big_groups_ = static_cast<uint32_t>(sk_big_groups); reduction_workspace_ = reduction_workspace; sk_tiles_ = sk_tiles; sk_units_ = static_cast<uint32_t>(sk_units); k_tiles_per_sk_unit_ = static_cast<uint32_t>(k_tiles_per_sk_unit); reduction_mode_ = reduction_mode; divmod_epilogue_subtile_ = FastDivmodU64(epilogue_subtile); separate_reduction_units_ = reduction_units; } // Given the inputs, computes the physical grid we should launch. // This variant of the method should only be used when // problem_shape and tile_shape contain modes of only rank 1. CUTLASS_HOST_DEVICE static dim3 get_grid_shape( BatchedGemmCoord problem_shape, GemmCoord cta_shape, GemmCoord cluster_shape, KernelHardwareInfo hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option ) { dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl(problem_shape, cta_shape, cluster_shape); return get_grid_shape( problem_blocks, cluster_shape, hw_info, max_swizzle_size, raster_order_option ); } // Version of get_grid_shape that takes in as input the number of CTAs in the M and N and L dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. CUTLASS_HOST_DEVICE static dim3 get_grid_shape( dim3 problem_blocks, GemmCoord cluster_shape, KernelHardwareInfo hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option ) { // Call into the underlying get_grid_shape method, but do not allow the grid shape returned // to be truncated based on the number of output tiles in the problem. return UnderlyingParams::get_grid_shape( problem_blocks, cluster_shape, hw_info, max_swizzle_size, raster_order_option, /* truncate_by_problem_size = */false ); } // Returns the number of stream-K tiles that will be computed amongst `output_tiles` total // output tiles on a device with `ctas_per_wave` CTAs in each wave. static uint32_t get_num_sk_tiles( uint64_t output_tiles, uint64_t ctas_per_wave, uint64_t cluster_size, uint32_t k_tiles_per_output_tile, DecompositionMode decomposition_mode ) { uint32_t full_waves = static_cast<uint32_t>(output_tiles / ctas_per_wave); uint32_t total_waves = static_cast<uint32_t>((output_tiles + ctas_per_wave - 1) / ctas_per_wave); if (decomposition_mode == DecompositionMode::DataParallel || decomposition_mode == DecompositionMode::SplitK) { return 0; } // If there is wave quantization, assign the first two waves worth of tiles to be // covered by stream-K work and the remainder to be data-parallel. Since we know // that full_waves == total_waves - 1 in this case, the number of data-parallel // waves is simply full_waves-1 (unless full_waves == 0). uint32_t dp_waves = full_waves > 1 ? full_waves - 1 : 0; uint64_t dp_tiles = dp_waves * ctas_per_wave; uint64_t sk_tiles = output_tiles - dp_tiles; if (decomposition_mode == DecompositionMode::Heuristic) { if (full_waves == total_waves || k_tiles_per_output_tile <= min_iters_per_sk_unit_) { // All tiles will be data-parallel tiles if there is either no quantization // or if there is no work to be split. return 0; } // // The final wave is not full. Perform some stream-K work. // // Rudimentary heuristic: prefer data-parallel decomposition if we have more than // one wave and the tail wave is more than half full. This is subject to change. uint64_t tail_tiles = output_tiles - (full_waves * ctas_per_wave); if (2 * tail_tiles >= ctas_per_wave) { return 0; } } return static_cast<uint32_t>(sk_tiles); } CUTLASS_HOST_DEVICE static uint64_t get_num_sk_units(GemmCoord cluster_shape, uint64_t ctas_per_sk_wave, uint32_t sk_tiles, uint32_t k_tiles_per_output_tile) { // If there are stream-K tiles to compute and a sufficiently large number of k iterations // across them, they will be covered by a single wave of persistent threadblocks. Thus, there // will be as many work units as there are threadblocks in a single wave. // // When the total k iterations across stream-K tiles is too small to justify distributing // across an entire wave of blocks, we instead distribute the iterations over a smaller // set of blocks. // Calculate the number of stream-K units that would be needed if each stream-K unit // computed the minimum allowable k iterations. Truncate this to be in units of clusters. // Number of k iterations computed by the stream-K units as a whole uint64_t k_tiles_sk_total = k_tiles_per_output_tile * sk_tiles; // Calculate the number of stream-K units that would be needed if each stream-K unit // computed the minimum allowable k iterations. Truncate this to be in units of clusters. auto cluster_size = cluster_shape.m() * cluster_shape.n(); uint64_t min_sized_sk_units = (k_tiles_sk_total / min_iters_per_sk_unit_); min_sized_sk_units = (min_sized_sk_units / cluster_size) * cluster_size; uint64_t sk_units = platform::min(ctas_per_sk_wave, min_sized_sk_units); return sk_units; } // Calculates the size of the workspace needed for holding reduction barriers CUTLASS_HOST_DEVICE static int get_barrier_workspace_size(uint64_t num_tiles, uint32_t mma_warp_groups, uint32_t barrier_bits) { auto workspace_bits = num_tiles * mma_warp_groups * barrier_bits; return round_up_to_l2_alignment(bits_to_bytes(static_cast<int>(workspace_bits))); } // Calculates the size of the workspace needed for holding partial outputs from splits CUTLASS_HOST_DEVICE static int get_reduction_workspace_size(uint64_t num_tiles, GemmCoord tile_shape, uint32_t accumulator_bits, uint32_t num_accumulator_mtxs = 1) { auto output_tile_size = tile_shape.m() * tile_shape.n(); auto workspace_bits = accumulator_bits * output_tile_size * num_tiles * num_accumulator_mtxs; return round_up_to_l2_alignment(bits_to_bytes(static_cast<int>(workspace_bits))); } #if !defined(__CUDACC_RTC__) static void get_workspace_component_sizes( dim3 problem_blocks, uint32_t k_tiles_per_output_tile, GemmCoord tile_shape, GemmCoord cluster_shape, int& barrier_workspace_size, int& reduction_workspace_size, KernelHardwareInfo const& hw_info, int splits, int max_swizzle, RasterOrderOptions raster_order_option, DecompositionMode decomposition_mode, uint32_t mma_warp_groups, uint32_t barrier_bits, uint32_t accumulator_bits, uint32_t epilogue_subtile = 1, uint32_t num_accumulator_mtxs = 1) { auto log_swizzle_size = UnderlyingParams::get_log_swizzle_size(problem_blocks.x, problem_blocks.y, max_swizzle); problem_blocks.x = round_up(problem_blocks.x, (1 << log_swizzle_size) * cluster_shape.m()); problem_blocks.y = round_up(problem_blocks.y, (1 << log_swizzle_size) * cluster_shape.n()); // Workspace is needed only for output tiles that will be split. Thus, we first determine the number // of output tiles that will be split, and then calculate the workspace needed to cover these. uint64_t output_tiles = problem_blocks.x * problem_blocks.y * problem_blocks.z; if (decomposition_mode == DecompositionMode::DataParallel) { barrier_workspace_size = 0; reduction_workspace_size = 0; } else if (decomposition_mode == DecompositionMode::SplitK || (decomposition_mode == DecompositionMode::Heuristic && splits > 1)) { // Basic split-K variant requires workspace for all output tiles barrier_workspace_size = get_barrier_workspace_size(output_tiles, mma_warp_groups, barrier_bits); reduction_workspace_size = get_reduction_workspace_size(output_tiles, tile_shape, accumulator_bits, num_accumulator_mtxs); } else { KernelHardwareInfo new_hw_info; new_hw_info.device_id = hw_info.device_id; new_hw_info.sm_count = hw_info.sm_count; if (new_hw_info.sm_count <= 0) { CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n" " For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count."); new_hw_info.sm_count = KernelHardwareInfo::query_device_multiprocessor_count(new_hw_info.device_id); } dim3 grid = get_grid_shape( problem_blocks, cluster_shape, new_hw_info, max_swizzle, raster_order_option ); uint64_t ctas_per_wave = grid.x * grid.y; uint64_t cluster_size = cluster_shape.m() * cluster_shape.n(); uint32_t sk_tiles = get_num_sk_tiles( output_tiles, ctas_per_wave, cluster_size, static_cast<uint32_t>(k_tiles_per_output_tile), decomposition_mode ); uint64_t ctas_per_sk_wave = ctas_per_wave; uint64_t sk_units = get_num_sk_units(cluster_shape, ctas_per_sk_wave, sk_tiles, k_tiles_per_output_tile); uint64_t dp_tiles = output_tiles - sk_tiles; uint64_t reduction_tiles = sk_tiles; if (should_perform_separate_reduction(epilogue_subtile, sk_units, sk_tiles, dp_tiles, ctas_per_wave)) { // In separate reduction, each peer writes to its own location in scratch space. // Thus, for separate reduction, we need as many reduction tiles per output tile // as there are the maximum number of peers that can collaborate on an output tile. reduction_tiles *= max_peers_per_tile(sk_units, sk_tiles); } // Though separate reduction requires a larger reduction workspace, only one barrier // is needed per output tile. Each peer will increment the barrier by one once the peer has // written its accumulator to scratch space. The separate reduction unit will only begin // performing the reduction when the barrier has reached the number of peers for the output tile. barrier_workspace_size = get_barrier_workspace_size(sk_tiles, mma_warp_groups, barrier_bits); reduction_workspace_size = get_reduction_workspace_size(reduction_tiles, tile_shape, accumulator_bits, num_accumulator_mtxs); } } #endif // !defined(__CUDACC_RTC__) // Returns whether the kernel is configured in a manner for which separate reduction should be used CUTLASS_HOST_DEVICE static bool should_perform_separate_reduction(uint32_t epilogue_subtile, uint64_t sk_units, uint64_t sk_tiles, uint64_t dp_tiles, uint64_t ctas_per_wave) { // We perform separate reduction if we have fewer than one wave of output tiles // and each output tile is covered by at least to stream-K units. When sk_units is // multiple of sk_tiles, will choose basic split-k path instead of separate reduction for now. return (epilogue_subtile != 1) && (dp_tiles == 0) && (sk_units > 2u * sk_tiles) && (sk_units + sk_tiles * epilogue_subtile <= ctas_per_wave); } // Get the amount of scratch workspace needed for the kernel. This variant of the method should only be used when // problem_shape and tile_shape contain modes of only rank 1. static size_t get_workspace_size( BatchedGemmCoord problem_shape, GemmCoord tile_shape, GemmCoord cluster_shape, KernelHardwareInfo const& hw_info, int splits, int max_swizzle, RasterOrderOptions raster_order_option, DecompositionMode decomposition_mode, uint32_t mma_warp_groups, uint32_t barrier_bits, uint32_t element_accumulator_bits, uint32_t epilogue_subtile, uint32_t num_accumulator_mtxs) { dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl(problem_shape, tile_shape, cluster_shape); uint32_t k_tiles_per_output_tile = (problem_shape.k() + tile_shape.k() - 1) / tile_shape.k(); return get_workspace_size( problem_blocks, k_tiles_per_output_tile, tile_shape, cluster_shape, hw_info, splits, max_swizzle, raster_order_option, decomposition_mode, mma_warp_groups, barrier_bits, element_accumulator_bits, epilogue_subtile, num_accumulator_mtxs ); } // Version of get_workspace_size that takes in as input the number of CTAs in the M and N dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. static size_t get_workspace_size( dim3 problem_blocks, uint32_t k_tiles_per_output_tile, GemmCoord tile_shape, GemmCoord cluster_shape, KernelHardwareInfo const& hw_info, int splits, int max_swizzle, RasterOrderOptions raster_order_option, DecompositionMode decomposition_mode, uint32_t mma_warp_groups, uint32_t barrier_bits, uint32_t element_accumulator_bits, uint32_t epilogue_subtile = 1, uint32_t num_accumulator_mtxs = 1) { int barrier_workspace_size = 0; int reduction_workspace_size = 0; #if !defined(__CUDACC_RTC__) get_workspace_component_sizes( problem_blocks, k_tiles_per_output_tile, tile_shape, cluster_shape, barrier_workspace_size, reduction_workspace_size, hw_info, splits, max_swizzle, raster_order_option, decomposition_mode, mma_warp_groups, barrier_bits, element_accumulator_bits, epilogue_subtile, num_accumulator_mtxs ); #endif return barrier_workspace_size + reduction_workspace_size; } // Initialize the workspace to be used for the kernel. This variant of the method should only be used when // problem_shape and tile_shape contain modes of only rank 1. static cutlass::Status initialize_workspace( void* workspace, cudaStream_t stream, BatchedGemmCoord problem_shape, GemmCoord tile_shape, GemmCoord cluster_shape, KernelHardwareInfo const& hw_info, int splits, int max_swizzle, RasterOrderOptions raster_order_option, DecompositionMode decomposition_mode, uint32_t mma_warp_groups, uint32_t barrier_bits, uint32_t element_accumulator_bits, uint32_t epilogue_subtile) { dim3 problem_blocks = UnderlyingParams::get_tiled_cta_shape_mnl(problem_shape, tile_shape, cluster_shape); uint32_t k_tiles_per_output_tile = (problem_shape.k() + tile_shape.k() - 1) / tile_shape.k(); return initialize_workspace( workspace, stream, problem_blocks, k_tiles_per_output_tile, tile_shape, cluster_shape, hw_info, splits, max_swizzle, raster_order_option, decomposition_mode, mma_warp_groups, barrier_bits, element_accumulator_bits, epilogue_subtile ); } // Version of initialize_workspace that takes in as input the number of CTAs in the M and N dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. static cutlass::Status initialize_workspace( void* workspace, cudaStream_t stream, dim3 problem_blocks, uint32_t k_tiles_per_output_tile, GemmCoord tile_shape, GemmCoord cluster_shape, KernelHardwareInfo const& hw_info, int splits, int max_swizzle, RasterOrderOptions raster_order_option, DecompositionMode decomposition_mode, uint32_t mma_warp_groups, uint32_t barrier_bits, uint32_t element_accumulator_bits, uint32_t epilogue_subtile = 1, uint32_t num_accumulator_mtxs = 1) { #if !defined(__CUDACC_RTC__) int barrier_workspace_size = 0; int reduction_workspace_size = 0; get_workspace_component_sizes( problem_blocks, k_tiles_per_output_tile, tile_shape, cluster_shape, barrier_workspace_size, reduction_workspace_size, hw_info, splits, max_swizzle, raster_order_option, decomposition_mode, mma_warp_groups, barrier_bits, element_accumulator_bits, epilogue_subtile, num_accumulator_mtxs ); if (barrier_workspace_size > 0) { if (workspace == nullptr) { return Status::kErrorWorkspaceNull; } // Only the barrier workspace needs to be cleared for stream-K. // Barrier workspace follows reduction workspace. uint8_t* barrier_workspace = reinterpret_cast<uint8_t*>(workspace) + reduction_workspace_size; return zero_workspace(static_cast<void*>(barrier_workspace), barrier_workspace_size, stream); } #endif // !defined(__CUDACC_RTC__) return Status::kSuccess; } void set_params_basic( UnderlyingParams const& underlying_params, uint32_t blocks_m, uint32_t blocks_n, uint32_t blocks_l, uint32_t splits, uint32_t k_tiles_per_output_tile, void* reduction_workspace, ReductionMode reduction_mode) { divmod_cluster_shape_major_ = underlying_params.divmod_cluster_shape_major_; divmod_cluster_shape_minor_ = underlying_params.divmod_cluster_shape_minor_; divmod_batch_ = FastDivmodU64(blocks_m * blocks_n); divmod_tiles_per_output_tile_ = FastDivmod(k_tiles_per_output_tile); divmod_sk_groups_ = FastDivmodU64(1u); auto cluster_size = underlying_params.divmod_cluster_shape_major_.divisor * underlying_params.divmod_cluster_shape_minor_.divisor; divmod_clusters_mnl_ = FastDivmodU64((blocks_m * blocks_n * blocks_l) / cluster_size); splits_ = splits; divmod_cluster_blk_major_ = underlying_params.divmod_cluster_blk_major_; log_swizzle_size_ = underlying_params.log_swizzle_size_; units_per_problem_ = blocks_m * blocks_n * blocks_l; raster_order_ = underlying_params.raster_order_; big_units_ = k_tiles_per_output_tile % splits; reduction_workspace_ = reduction_workspace; reduction_mode_ = reduction_mode; k_tiles_per_sk_unit_ = k_tiles_per_output_tile / splits; // No stream-K work is performed for "basic" data-parallel and split-K decompositions sk_tiles_ = 0; sk_units_ = 0; divmod_sk_units_per_group_ = FastDivmodU64(1u); separate_reduction_units_ = 0; } private: // Round up number of bytes to the nearest multiple of L2 cache line alignment CUTLASS_HOST_DEVICE static int round_up_to_l2_alignment(int bytes) { constexpr static uint32_t L2CacheLineSizeBytes = 128; return (bytes + L2CacheLineSizeBytes - 1) / L2CacheLineSizeBytes * L2CacheLineSizeBytes; } }; //////////////////////////////////////////////////////////////////////////////// // Parameters for SM90 persistent group scheduler (only used for Grouped Gemms) template<class ProblemShape> struct PersistentTileSchedulerSm90GroupParams { enum class RasterOrder { AlongM, AlongN }; enum class RasterOrderOptions { Heuristic, AlongM, AlongN }; FastDivmodU64Pow2 divmod_cluster_shape_major_{}; FastDivmodU64Pow2 divmod_cluster_shape_minor_{}; FastDivmodU64 divmod_cta_shape_m_{}; FastDivmodU64 divmod_cta_shape_n_{}; uint64_t blocks_across_problem_ = 0; bool pre_processed_problem_shapes = true; int32_t log_swizzle_size_ = 0; RasterOrder raster_order_ = RasterOrder::AlongN; int32_t groups_ = 0; ProblemShape* problem_shapes_ = nullptr; GemmCoord cta_shape_; GemmCoord cluster_shape_; // Version of initialize that takes in as input the number of CTAs in the M and N and L dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. void initialize( dim3 problem_blocks, int32_t groups, ProblemShape* problem_shapes, ProblemShape const* host_problem_shapes, GemmCoord cta_shape, GemmCoord cluster_shape, KernelHardwareInfo const& hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option ) { CUTLASS_UNUSED(hw_info); // Round up to nearest multiple of swizzle_size along each mode auto log_swizzle_size = get_log_swizzle_size(problem_blocks.x, problem_blocks.y, max_swizzle_size); auto problem_blocks_m = round_up(problem_blocks.x, (1 << log_swizzle_size) * cluster_shape.m()); auto problem_blocks_n = round_up(problem_blocks.y, (1 << log_swizzle_size) * cluster_shape.n()); RasterOrder raster_order = get_rasterization_order( problem_blocks_m, problem_blocks_n, raster_order_option ); // // Set members // groups_ = groups; problem_shapes_ = problem_shapes; cta_shape_ = cta_shape; cluster_shape_ = cluster_shape; blocks_across_problem_ = problem_blocks.x * problem_blocks.y * problem_blocks.z; pre_processed_problem_shapes = (host_problem_shapes == nullptr) ? false : true; log_swizzle_size_ = log_swizzle_size; raster_order_ = raster_order; if (raster_order == RasterOrder::AlongN) { divmod_cluster_shape_major_ = FastDivmodU64Pow2(cluster_shape.n()); divmod_cluster_shape_minor_ = FastDivmodU64Pow2(cluster_shape.m()); } else { divmod_cluster_shape_major_ = FastDivmodU64Pow2(cluster_shape.m()); divmod_cluster_shape_minor_ = FastDivmodU64Pow2(cluster_shape.n()); } divmod_cta_shape_m_ = FastDivmodU64(cta_shape_.m()); divmod_cta_shape_n_ = FastDivmodU64(cta_shape_.n()); } // Version of get_tiled_cta_shape_mnl that takes in as input the number of CTAs in the M and N dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. CUTLASS_HOST_DEVICE static dim3 get_tiled_cta_shape_mnl(GemmCoord cluster_shape, uint32_t cta_m, uint32_t cta_n) { // Round up to nearest multiple of cluster dim along each mode auto problem_blocks_m = ((cta_m + cluster_shape.m() - 1) / cluster_shape.m()) * cluster_shape.m(); auto problem_blocks_n = ((cta_n + cluster_shape.n() - 1) / cluster_shape.n()) * cluster_shape.n(); return { static_cast<uint32_t>(cta_m), static_cast<uint32_t>(cta_n), static_cast<uint32_t>(1) // Only a single batch per group is currently supported }; } // Version of get_grid_shape that takes in as input the number of CTAs in the M and N and L dimensions. // This is useful for calculating the tiled shape when a mode of problem and/or CTA shape has rank > 1, // for which using CuTe algebra for calculating tile shapes is easiest. CUTLASS_HOST_DEVICE static dim3 get_grid_shape( dim3 problem_blocks, GemmCoord cluster_shape, KernelHardwareInfo hw_info, int max_swizzle_size, RasterOrderOptions raster_order_option, bool truncate_by_problem_size=true) { int const sm_count = hw_info.sm_count; // Round up to nearest multiple of swizzle_size along each mode auto log_swizzle_size = get_log_swizzle_size(problem_blocks.x, problem_blocks.y, max_swizzle_size); auto problem_blocks_m = round_up(problem_blocks.x, (1 << log_swizzle_size) * cluster_shape.m()); auto problem_blocks_n = round_up(problem_blocks.y, (1 << log_swizzle_size) * cluster_shape.n()); int problem_blocks_total = problem_blocks_m * problem_blocks_n * problem_blocks.z; RasterOrder raster_order = get_rasterization_order( problem_blocks_m, problem_blocks_n, raster_order_option ); dim3 launch_grid; if (raster_order == RasterOrder::AlongN) { launch_grid = dim3(cluster_shape.m(), 1, 1); } else { launch_grid = dim3(1, cluster_shape.n(), 1); } auto possibly_truncate = [&](int x, int y) { if (truncate_by_problem_size) { return platform::min(x, y); } else { return x; } }; // The else path is generic, however, we can avoid some divs if we know cluster size is 1 auto cluster_size = cluster_shape.m() * cluster_shape.n(); if (cluster_size == 1) { if (raster_order == RasterOrder::AlongN) { launch_grid.y = possibly_truncate(sm_count, problem_blocks_total); } else { launch_grid.x = possibly_truncate(sm_count, problem_blocks_total); } } else { // Optimal grid size calculation is based on // GH100: 8 GPCs, 72 TPCs (9 TPCs/GPC), 2 SMs/TPC, 144 SMs per full GPU // Hence, maximum SMs per GPC = 18 constexpr int max_sm_per_gpc = 18; // Provided SM count could possibly be less than the assumed maximum SMs per GPC auto cluster_size = cluster_shape.m() * cluster_shape.n(); int const min_num_gpc = sm_count < max_sm_per_gpc ? 1 : sm_count / max_sm_per_gpc; int const max_cta_occupancy_per_gpc = max_sm_per_gpc - (max_sm_per_gpc % cluster_size); int cta_per_device = min_num_gpc * max_cta_occupancy_per_gpc; // The calculation below allows for larger grid size launch for different GPUs. int const num_gpc_residual = sm_count < max_sm_per_gpc ? 0 : sm_count % max_sm_per_gpc; int const max_cta_occupancy_per_residual_gpc = num_gpc_residual - (num_gpc_residual % cluster_size); cta_per_device += max_cta_occupancy_per_residual_gpc; cta_per_device = sm_count < cta_per_device ? sm_count : cta_per_device; if (raster_order == RasterOrder::AlongN) { launch_grid.y = possibly_truncate( cta_per_device / cluster_shape.m(), problem_blocks_total / cluster_shape.m()); } else { launch_grid.x = possibly_truncate( cta_per_device / cluster_shape.n(), problem_blocks_total / cluster_shape.n()); } } return launch_grid; } CUTLASS_HOST_DEVICE static int32_t get_log_swizzle_size(int problem_ctas_m, int problem_ctas_n, int max_swizzle_size) { int min_cta_dim = platform::min(problem_ctas_m, problem_ctas_n); if (max_swizzle_size >= 8 && min_cta_dim >= 6) { return 3; } else if (max_swizzle_size >= 4 && min_cta_dim >= 3) { return 2; } else if (max_swizzle_size >= 2 && min_cta_dim >= 2) { return 1; } else { return 0; } } CUTLASS_HOST_DEVICE static RasterOrder get_rasterization_order( uint32_t tiles_m, uint32_t tiles_n, RasterOrderOptions raster_order_option ) { if (raster_order_option == RasterOrderOptions::Heuristic) { if (tiles_n > tiles_m) { return RasterOrder::AlongM; } else { return RasterOrder::AlongN; } } else { switch (raster_order_option) { case RasterOrderOptions::AlongN: return RasterOrder::AlongN; break; default: return RasterOrder::AlongM; } } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace detail } // namespace kernel } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/gemm/kernel/tile_scheduler_params.h/0
{ "file_path": "include/cutlass/gemm/kernel/tile_scheduler_params.h", "repo_id": "include", "token_count": 21776 }
34
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic properties needed by CTA-level GEMMs assuming expectations about data layout of the global memory fragments, data types, and internal tile sizes. Partial specializations for threadblock::Mma operations targeting TensorOp instructions. */ #pragma once #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/layout/tensor_op_multiplicand_sm75.h" #include "cutlass/layout/tensor_op_multiplicand_sm80.h" #include "cutlass/gemm/warp/default_mma_with_reduction_tensor_op.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" #include "cutlass/gemm/threadblock/default_mma_core.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op_sm80.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_pitch_linear.h" #include "cutlass/gemm/threadblock/mma_with_reduction_multistage.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Template defininng default matrix multiply operators inferred from threadblock tile size, /// global memory data layout, and target math instruction. template < /// Shape of threadblock-scoped matrix multiply operator typename Shape_, /// Shape of warp-level matrix multiply operator typename WarpShape, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape, /// Element data type of A operand typename ElementA, /// Layout of operand A typename LayoutA, /// Element data type of B operand typename ElementB, /// Layout of operand B typename LayoutB, /// Data type of accumulator typename ElementC, /// Layout of accumulator typename LayoutC, /// Indicates type of math operator (arch::OpClassSimt or arch::OpClassTensorOp) typename OperatorClass, /// Reduce operand A or B along K dimension bool ReduceKForA_, /// Number of stages int Stages = 2, /// Operation performed by MMA typename Operator = typename platform::conditional< (platform::is_same<OperatorClass, cutlass::arch::OpClassTensorOp>::value) && (platform::is_same<ElementA, int8_t>::value || platform::is_same<ElementA, int4b_t>::value || platform::is_same<ElementA, uint8_t>::value || platform::is_same<ElementA, uint4b_t>::value), cutlass::arch::OpMultiplyAddSaturate, cutlass::arch::OpMultiplyAdd>::type, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor = false, /// Cache operation of operand A cutlass::arch::CacheOperation::Kind CacheOpA = cutlass::arch::CacheOperation::Global, /// Cache operation of operand B cutlass::arch::CacheOperation::Kind CacheOpB = cutlass::arch::CacheOperation::Global, /// per-element transformation for elements of A ComplexTransform TransformA = ComplexTransform::kNone, /// per-element transformation for elements of B ComplexTransform TransformB = ComplexTransform::kNone, bool IsComplex = false// (is_complex<ElementA>::value || is_complex<ElementB>::value) > struct DefaultMmaWithReductionCore { using Base = DefaultMmaCore<Shape_, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, OperatorClass, Stages, Operator, AccumulatorsInRowMajor, CacheOpA, CacheOpB, TransformA, TransformB, IsComplex>; using Shape = Shape_; using IteratorThreadMapA = typename Base::IteratorThreadMapA; using IteratorThreadMapB = typename Base::IteratorThreadMapB; using SmemIteratorA = typename Base::SmemIteratorA; using SmemIteratorB = typename Base::SmemIteratorB; using SmemLayoutA = typename Base::SmemLayoutA; using SmemLayoutB = typename Base::SmemLayoutB; using WarpCount = typename Base::WarpCount; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; // Define the warp-level tensor op using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaWithReductionTensorOp< WarpShape, InstructionShape, ElementA, SmemLayoutA, ElementB, SmemLayoutB, ElementC, LayoutC, Operator, ReduceKForA_, WarpCount::kK>::Type; /// Policy used to define MmaPipelined using MmaPolicy = MmaPolicy<MmaTensorOp, MatrixShape<0, 0>, MatrixShape<0, 0>, WarpCount::kK>; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass
include/cutlass/gemm/threadblock/default_mma_core_with_reduction.h/0
{ "file_path": "include/cutlass/gemm/threadblock/default_mma_core_with_reduction.h", "repo_id": "include", "token_count": 2646 }
35
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default warp-level GEMM operators selected by data type, size, and layouts of operands. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/warp/mma_complex_tensor_op.h" #include "cutlass/gemm/warp/mma_complex_tensor_op_fast_f32.h" #include "cutlass/gemm/warp/mma_gaussian_complex_tensor_op.h" #include "cutlass/layout/tensor_op_multiplicand_sm80.h" namespace cutlass { namespace gemm { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Complex transform on A operand ComplexTransform TransformA = ComplexTransform::kNone, /// Complex transform on B operand ComplexTransform TransformB = ComplexTransform::kNone, /// Multiply-add operator (arch::OpMultiplyAddComplex, arch::OpMultiplyGaussianComplex) typename Operator_ = arch::OpMultiplyAddComplex> struct DefaultMmaComplexTensorOp; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex<T>*complex<T> case // 4 real-valued mma operations // A = (ar + j ai), B (br +j bi), D = AB // D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Real-valued underlying type of complex-valued A operand typename RealElementA, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Real-valued underlying type of complex-valued B operand typename RealElementB, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Real-valued underlying type of complex-valued C operand typename RealElementC, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, InstructionShape_, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddComplex> { using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< InstructionShape_, 32, RealElementA, cutlass::layout::RowMajor, RealElementB, cutlass::layout::ColumnMajor, RealElementC, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaComplexTensorOp< WarpShape_, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, Policy, TransformA, TransformB>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex<T>*complex<T> case using GaussianComplex operation // 3 real-valued mma operations // A = (ar + j ai), B = (br +j bi), D = AB // P1 = (ar + ai) * br, P2 = - ar * (br - bi), P3 = ai * (br + bi) // D = dr + j di = (P1 - P3) + j (P1 + P2) ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Real-valued underlying type of complex-valued A operand typename RealElementA, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Real-valued underlying type of complex-valued B operand typename RealElementB, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Real-valued underlying type of complex-valued C operand typename RealElementC, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, InstructionShape_, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddGaussianComplex> { using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< InstructionShape_, 32, RealElementA, cutlass::layout::RowMajor, RealElementB, cutlass::layout::ColumnMajor, RealElementC, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaGaussianComplexTensorOp< WarpShape_, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, Policy, TransformA, TransformB>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization - input and output types are complex<float>*complex<float> // Use TF32 tensor operation internally // 4 real-valued mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 operations on TF32 // A = (ar + j ai), B (br +j bi), D = AB // D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, InstructionShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddComplex> { // Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 mma instruction using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< InstructionShape_, 32, tfloat32_t, cutlass::layout::RowMajor, tfloat32_t, cutlass::layout::ColumnMajor, float, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaComplexTensorOp< WarpShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, Policy, TransformA, TransformB>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization - input and output types are complex<float>*complex<float> // Use BF16 tensor operation internally // 4 real-valued mma.sync.aligned.m16n8k8.f32.bf16.bf16.f32 operations on BF16 // A = (ar + j ai), B (br +j bi), D = AB // D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, InstructionShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddFastBF16> { // Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.bf16.bf16.f32 mma instruction using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< InstructionShape_, 32, bfloat16_t, cutlass::layout::RowMajor, bfloat16_t, cutlass::layout::ColumnMajor, float, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaComplexTensorOp< WarpShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, Policy, TransformA, TransformB>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization - input and output types are complex<float>*complex<float> // Use F16 tensor operation internally // 4 real-valued mma.sync.aligned.m16n8k8.f32.f16.f16.f32 operations on F16 // A = (ar + j ai), B (br +j bi), D = AB // D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, InstructionShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddFastF16> { // Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.f16.f16.f32 mma instruction using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< InstructionShape_, 32, half_t, cutlass::layout::RowMajor, half_t, cutlass::layout::ColumnMajor, float, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaComplexTensorOp< WarpShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, Policy, TransformA, TransformB>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// 3xTF32 or 4xTF32 (fast and accurate complex<float> operation) /// Partial specialization - input and output types are complex<float> * complex<float> // Use 3xTF32 or 4xTF32 tensor operation internally // 4 real-valued mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 operations on TF32 // A = (ar + j ai), B (br +j bi), D = AB // D = dr + j di = 3x[(ar*br - ai*bi) + j (ar*bi + ai*br)] ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, InstructionShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddComplexFastF32> { // Complex floating point tensor operation use mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 mma instruction using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< InstructionShape_, 32, tfloat32_t, cutlass::layout::RowMajor, tfloat32_t, cutlass::layout::ColumnMajor, float, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaComplexTensorOpFastF32< WarpShape_, complex<float>, LayoutA, complex<float>, LayoutB, complex<float>, LayoutC, Policy, TransformA, TransformB>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex<double>*complex<double> case // 4 real-valued mma.sync.aligned.m16n8k4.f64.f64.f64.f64 operations // A = (ar + j ai), B (br +j bi), D = AB // D = dr + j di = (ar*br - ai*bi) + j (ar*bi + ai*br) ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Real-valued underlying type of complex-valued A operand typename RealElementA, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Real-valued underlying type of complex-valued B operand typename RealElementB, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Real-valued underlying type of complex-valued C operand typename RealElementC, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, GemmShape<16, 8, 4>, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddComplex> { using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< GemmShape<16, 8, 4>, 32, RealElementA, cutlass::layout::RowMajor, RealElementB, cutlass::layout::ColumnMajor, RealElementC, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaComplexTensorOp< WarpShape_, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, Policy, TransformA, TransformB, true>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex<T>*complex<T> case using GaussianComplex operation // 3 real-valued mma.sync.aligned.m16n8k4.f64.f64.f64.f64 operations // A = (ar + j ai), B = (br +j bi), D = AB // P1 = (ar + ai) * br, P2 = - ar * (br - bi), P3 = ai * (br + bi) // D = dr + j di = (P1 - P3) + j (P1 + P2) ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename WarpShape_, /// Real-valued underlying type of complex-valued A operand typename RealElementA, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Real-valued underlying type of complex-valued B operand typename RealElementB, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Real-valued underlying type of complex-valued C operand typename RealElementC, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Complex transform on A operand ComplexTransform TransformA, /// Complex transform on B operand ComplexTransform TransformB> struct DefaultMmaComplexTensorOp< WarpShape_, GemmShape<16, 8, 4>, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, TransformA, TransformB, arch::OpMultiplyAddGaussianComplex> { using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< GemmShape<16, 8, 4>, 32, RealElementA, cutlass::layout::RowMajor, RealElementB, cutlass::layout::ColumnMajor, RealElementC, cutlass::layout::RowMajor, arch::OpMultiplyAdd>, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaGaussianComplexTensorOp< WarpShape_, complex<RealElementA>, LayoutA, complex<RealElementB>, LayoutB, complex<RealElementC>, LayoutC, Policy, TransformA, TransformB, true>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass
include/cutlass/gemm/warp/default_mma_complex_tensor_op.h/0
{ "file_path": "include/cutlass/gemm/warp/default_mma_complex_tensor_op.h", "repo_id": "include", "token_count": 7122 }
36
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines iterators used by warp-level loading scale and bias vectors. Every scale/bias data only needs to be loaded once for every channel. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_ref.h" #include "cutlass/matrix_shape.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor_op_multiplicand_sm75.h" #include "cutlass/platform/platform.h" #include "cutlass/fast_math.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { //////////////////////////////////////////////////////////////////////////////// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of A elements typename Element_, /// Layout of operand typename Layout_, /// Shape of one matrix production operation (concept: GemmShape) typename InstructionShape_, /// Policy of the details of LDSM shape and iterations typename Policy_, /// Number of threads participating in one matrix operation int Threads, /// Number of partitions along K dimension int PartitionsK_ = 1> class ScaleBiasTileIterator; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to /// load from shared memory and therefore must be initialized with a TensorRef /// to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: PitchLinearShape) typename Shape_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: PitchLinearShape) typename InstructionShape_, /// Policy of the details of LDSM shape and iterations typename Policy_, /// Number of partitions along K dimension int PartitionsK_> class ScaleBiasTileIterator<Shape_, Element_, cutlass::layout::PitchLinear, InstructionShape_, Policy_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::PitchLinear; /// Shape of one matrix product operation (concept: GemmShape) using InstructionShape = InstructionShape_; /// Number of participating threads static int const kThreads = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; /// Number of partitions along K dimension static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection using Policy = Policy_; private: /// Pointer type used for accesses using AccessType = Array<Element, kElementsPerAccess>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = Array<Element, 2 * Policy::kLdsmOpInner * InstructionShape::kContiguous / kThreads>; private: /// Shared memory base pointers - not advanced AccessType const *pointer_; /// Byte offset incremented as iterator advances Index byte_offset_; /// Internal counter used to determine when to increment byte offset and when /// to XOR it int k_group_idx_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE ScaleBiasTileIterator() : pointer_(nullptr), byte_offset_(0), k_group_idx_(0) {} /// Constructor from TensorRef CUTLASS_DEVICE ScaleBiasTileIterator(TensorRef const &ref_scale_bias, int lane_id) : byte_offset_(0), k_group_idx_(0) { /// 16816 only pointer_ = reinterpret_cast<AccessType const *>(ref_scale_bias.data()) + ((lane_id >> 3) & 1) * Shape::kContiguous / kElementsPerAccess + (lane_id >> 4); } /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_DEVICE ScaleBiasTileIterator &add_pointer_offset(LongIndex offset) { byte_offset_ += offset * sizeof_bits<Element>::value / 8; return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_DEVICE ScaleBiasTileIterator &add_tile_offset( TensorCoord const &tile_offset) { int whole_tiles = tile_offset.contiguous() / Policy::kGroupsPerTile; int k_groups_delta = tile_offset.contiguous() % Policy::kGroupsPerTile; byte_offset_ += k_groups_delta * sizeof_bits<Element>::value * kElementsPerAccess * Policy::LdsmShape::kContiguous / 8; // Multiply by 2 because scale and bias belonging to the same stage are next // to each other in the shared memory. pointer_ += (2 * whole_tiles * Shape::kContiguous / kElementsPerAccess); return *this; } /// Advances the iterator along the advance dimension CUTLASS_DEVICE ScaleBiasTileIterator &operator++() { byte_offset_ += Policy::LdsmShape::kContiguous * sizeof_bits<Element>::value * kElementsPerAccess / 8; k_group_idx_++; if (k_group_idx_ == (Policy::kGroupsPerTile / kPartitionsK)) { k_group_idx_ = 0; byte_offset_ -= (Policy::kGroupsPerTile / kPartitionsK) * Policy::LdsmShape::kContiguous * sizeof_bits<Element>::value * kElementsPerAccess / 8; add_tile_offset({Policy::kGroupsPerTile, 0}); } return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE ScaleBiasTileIterator &operator--() { assert(0); } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE ScaleBiasTileIterator &operator+=( TensorCoord const &tile_offset) { add_tile_offset(tile_offset); return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE ScaleBiasTileIterator &operator-=( TensorCoord const &tile_offset) { add_tile_offset(-tile_offset); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { load_with_byte_offset(frag, 0); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset in units of bytes Index byte_offset) const { Array<unsigned, 4> *fetch_ptr = reinterpret_cast<Array<unsigned, 4> *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < 1; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) { int access_idx = c + s * Policy::LdsmIterations::kContiguous; AccessType const *source_ptr = pointer_ + Policy::LdsmShape::kContiguous * c; char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_; cutlass::arch::ldsm<layout::RowMajor, 4>( fetch_ptr[access_idx], source_byte_ptr); } } } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { load_with_byte_offset(frag, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { load_with_byte_offset(frag, tile_offset, 0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element)); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { Index pointer_offset = tile_offset.contiguous() * InstructionShape::kContiguous / kElementsPerAccess; byte_offset += sizeof_bits<AccessType>::value * pointer_offset / 8; load_with_byte_offset(frag, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { k_group_idx_ = k_group % (Policy::kGroupsPerTile / kPartitionsK); } }; //////////////////////////////////////////////////////////////////////////////// /// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to /// load from shared memory and therefore must be initialized with a TensorRef /// to shared memory. /// /// Satisfies: /// ReadableRandomAccessContiguousTileIteratorConcept /// template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Data type of elements typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Policy of the details of LDSM shape and iterations typename Policy_, /// Number of partitions along K dimension int PartitionsK_> class ScaleBiasTileIterator<Shape_, Element_, cutlass::layout::RowMajor, InstructionShape_, Policy_, 32, PartitionsK_> { public: /// Shape of tile to load (concept: PitchLinearShape) using Shape = Shape_; /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::RowMajor; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Number of participating threads static int const kThreads = 32; /// TensorRef type for loading element from a tensor using TensorRef = TensorRef<Element, Layout>; /// Index type using Index = typename TensorRef::Index; /// Long Index type using LongIndex = typename TensorRef::LongIndex; /// Coordinate for an element in the tensor using TensorCoord = typename TensorRef::TensorCoord; /// Internal structure of iterator - made public to enable introspection using Policy = Policy_; /// Underlying tile iterator implementation using Base = ScaleBiasTileIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::PitchLinear, layout::PitchLinearShape<InstructionShape::kColumn, InstructionShape::kRow>, Policy, kThreads, PartitionsK_>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile using Fragment = typename Base::Fragment; private: /// Underlying tile iterator Base iterator_; public: /// Default ctor constructs null iterator CUTLASS_HOST_DEVICE ScaleBiasTileIterator() {} /// Constructor from TensorRef CUTLASS_HOST_DEVICE ScaleBiasTileIterator(TensorRef const &ref_scale_bias, int lane_id) : iterator_({ref_scale_bias.data(), ref_scale_bias.stride()}, lane_id) {} /// Adds a pointer offset to internal pointer(s) to advance through memory CUTLASS_HOST_DEVICE ScaleBiasTileIterator &add_pointer_offset(LongIndex offset) { iterator_.add_pointer_offset(offset); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE ScaleBiasTileIterator &add_tile_offset( TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); return *this; } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_DEVICE ScaleBiasTileIterator &add_tile_offset_negative( TensorCoord const &tile_offset) { iterator_.add_tile_offset_negative({tile_offset.column(), tile_offset.row()}); return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE ScaleBiasTileIterator &operator++() { ++iterator_; return *this; } /// Advances the iterator along the advance dimension CUTLASS_HOST_DEVICE ScaleBiasTileIterator &operator--() { --iterator_; return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE ScaleBiasTileIterator &operator+=( TensorCoord const &tile_offset) { add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row())); return *this; } ///< advances in units of whole tiles along the logical coordinate space of ///< the tensor CUTLASS_DEVICE ScaleBiasTileIterator &operator-=( TensorCoord const &tile_offset) { add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row())); return *this; } /// Loads a fragment from memory at the location pointed to by the iterator. CUTLASS_HOST_DEVICE void load(Fragment &frag) const { iterator_.load(frag); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_pointer_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index pointer_offset) const { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory with additional logical offset CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a linear offset Index byte_offset) const { iterator_.load_with_byte_offset(frag, byte_offset); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset) const { assert(0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index pointer_offset) const { assert(0); } /// Loads a fragment from memory with logical offset in units of whole tiles. CUTLASS_DEVICE void load_with_byte_offset( /// fragment to load from the tensor Fragment &frag, /// loads a tile with a logical offset in units of whole tiles TensorCoord const &tile_offset, /// loads a tile with a logical offset AND a pointer offset Index byte_offset) const { iterator_.load_with_byte_offset( frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset); } /// Notify the iterator which k-group it is currently pointing to. /// /// This does not advance the iterator. Rather, it overrides its internal /// tracking with constant-valued k-group index to enable the compiler to /// fold constants and achieve more efficient code. /// /// This is used by some nontrivial permuted layouts. CUTLASS_DEVICE void set_kgroup_index(int k_group) { iterator_.set_kgroup_index(k_group); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/gemm/warp/scale_bias_tile_iterator.h/0
{ "file_path": "include/cutlass/gemm/warp/scale_bias_tile_iterator.h", "repo_id": "include", "token_count": 6222 }
37
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/coord.h" #include "cutlass/matrix_coord.h" #include "cutlass/layout/pitch_linear.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace layout { //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear /// memory and Crosswise size (in elements). /// This one is the base class of all Ampere/Turing fp16/bf16/int8/int4/int1 /// tensor core kernels. tf32 TN uses this too. template <int ElementSize, int Crosswise> struct TensorOpMultiplicand { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Static constants // /// This layout is optimized for 128b accesses static int const kAccessSize = 128; static int const kElementSize = ElementSize; static int const kElementsPerAccess = kAccessSize / kElementSize; static int const kCrosswise = Crosswise; /// Contiguous dimension of the tile shape matches one shared memory cache /// line - 128B. For 128bit access size, it equals to 8 accesses. static int const kTileShapeContiguous = 128 / (kAccessSize / 8); /// Number of kblocks to store PartitionShape::kContiguous Elements static int const kFactor = kTileShapeContiguous * kElementsPerAccess / kCrosswise; static_assert( (kFactor > 0), "kCrosswise should be no large than one shared memory cache line."); /// The strided dimension needs to be at least (WarpSize(32) / /// kTileShapeContiguous) for a warp to access. To ensure conflict free /// access, it also needs to be at least (kTileShapeContiguous / kFactor). /// See comments below static int const kTileShapeStride = ((kTileShapeContiguous / kFactor) > (32 / kTileShapeContiguous)) ? (kTileShapeContiguous / kFactor) : (32 / kTileShapeContiguous); /// Fundamental tile shape in units of vectors to guarantee bank conflict free /// shared memory load/store. /// For kFactor = 1, TileShape = <8, 8> /// For kFactor > 1, TileShape = <8, 4> using TileShape = PitchLinearShape<kTileShapeContiguous, kTileShapeStride>; /// Fundamental partition shape in units of vectors using PartitionShape = PitchLinearShape<4, 4>; using PartitionCount = PitchLinearShape<TileShape::kContiguous / PartitionShape::kContiguous, TileShape::kStrided / PartitionShape::kStrided>; using AccessCount = PitchLinearShape<PartitionShape::kContiguous, PartitionShape::kStrided>; private: // // Data members // /// Stride data member. For GEMM, it equals to kCrosswise x stage. Stride stride_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicand(Index ldm = 0) : stride_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicand(Stride stride) : stride_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicand packed(TensorCoord const &extent) { return TensorOpMultiplicand(extent[0]); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { // // First, compute c and s of vector within source (in units of vector // accesses) // int vec_contiguous_idx = coord.contiguous() / kElementsPerAccess; int vec_strided_idx = coord.strided() / kFactor; // Compute the fundamental tile being accessed int tile_contiguous_idx = vec_contiguous_idx / (TileShape::kContiguous / kFactor); int tile_contiguous_residual = vec_contiguous_idx % (TileShape::kContiguous / kFactor) + ((coord.strided() % kFactor) * (TileShape::kContiguous / kFactor)); int tile_strided_residual = vec_strided_idx % TileShape::kStrided; // Compute the 'partition' within the fundamental tile int partition_contiguous_idx = tile_contiguous_residual / PartitionShape::kContiguous; int partition_strided_idx = tile_strided_residual / PartitionShape::kStrided; int partition_contiguous_residual = tile_contiguous_residual % PartitionShape::kContiguous; int partition_strided_residual = tile_strided_residual % PartitionShape::kStrided; // // Then swizzle // int permuted_vec_contiguous_within_partition = partition_contiguous_residual ^ (partition_strided_residual % 4); int permuted_partition_contiguous_within_tile = partition_contiguous_idx ^ (partition_strided_idx % 2); // // Compute final element location // int element_contiguous = (tile_contiguous_idx * TileShape::kContiguous + permuted_partition_contiguous_within_tile * PartitionShape::kContiguous + permuted_vec_contiguous_within_partition) * kElementsPerAccess + (coord.contiguous() % kElementsPerAccess); int element_strided = vec_strided_idx; return element_contiguous + element_strided * stride_[0] * kFactor; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return extent[1] * stride_[0]; } }; //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear /// memory and Crosswise size (in elements). template <int ElementSize, int Crosswise> struct TensorOpMultiplicandCongruous { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicand<ElementSize, Crosswise>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; static int const kCrosswise = Base::kCrosswise; static int const kFactor = Base::kFactor; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCongruous(Index ldm = 0) : layout_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCongruous(Stride stride) : layout_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicandCongruous packed(TensorCoord const &extent) { return TensorOpMultiplicandCongruous(extent[0]); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(coord); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return coord; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(extent); } }; //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear /// memory and Crosswise size (in elements). /// This one is just for TF32 NT kernel. template <int Crosswise> struct TensorOpMultiplicandCongruous<32, Crosswise> { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // /// This layout is optimized for 128b accesses static int const kAccessSize = 128; /// Fundamental tile shape in units of vectors using TileShape = PitchLinearShape<8, 4>; /// Partitionshape is the same as TileShape for this layout using PartitionShape = PitchLinearShape<8, 4>; using PartitionCount = PitchLinearShape<TileShape::kContiguous / PartitionShape::kContiguous, TileShape::kStrided / PartitionShape::kStrided>; using AccessCount = PitchLinearShape<PartitionShape::kContiguous, PartitionShape::kStrided>; // // Static constants // static int const kElementSize = 32; static int const kElementsPerAccess = kAccessSize / kElementSize; static int const kCrosswise = Crosswise; static int const kFactor = 1; private: // // Data members // /// Stride data member. Stride stride_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCongruous(Index ldm = 0) : stride_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCongruous(Stride stride) : stride_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicandCongruous packed(TensorCoord const &extent) { return TensorOpMultiplicandCongruous(extent[0]); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { int tc = coord.contiguous() / 32; int ts = coord.strided() / 4; int c = (coord.contiguous() % 32) / kElementsPerAccess; int s = coord.strided() % 4; LongIndex offset = (c ^ (2 * s)) * kElementsPerAccess + s * stride_[0] + tc * 32 + ts * stride_[0] * 4 + coord.contiguous() % 4; return offset; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return extent[1] * stride_[0]; } }; //////////////////////////////////////////////////////////////////////////////// /// Template mapping a column-major view of pitch-linear memory to /// TensorOpMultiplicand template <int ElementSize, int Crosswise> struct ColumnMajorTensorOpMultiplicandCongruous { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicandCongruous<ElementSize, Crosswise>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; static int const kCrosswise = Base::kCrosswise; static int const kFactor = Base::kFactor; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static ColumnMajorTensorOpMultiplicandCongruous packed(TensorCoord const &extent) { return ColumnMajorTensorOpMultiplicandCongruous(extent.row()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.row(), coord.column())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.contiguous(), coord.strided()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); } }; //////////////////////////////////////////////////////////////////////////////// /// Template mapping a row-major view of pitch-linear memory to /// TensorOpMultiplicand template <int ElementSize, int Crosswise> struct RowMajorTensorOpMultiplicandCongruous { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicandCongruous<ElementSize, Crosswise>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; static int const kCrosswise = Base::kCrosswise; static int const kFactor = Base::kFactor; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicandCongruous(Index ldm = 0): layout_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicandCongruous(Stride stride): layout_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static RowMajorTensorOpMultiplicandCongruous packed(TensorCoord const &extent) { return RowMajorTensorOpMultiplicandCongruous(extent.column()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.column(), coord.row())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.strided(), coord.contiguous()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); } }; //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear /// memory and Crosswise size (in elements). template <int ElementSize, int Crosswise> struct TensorOpMultiplicandCrosswise { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicand<ElementSize, Crosswise>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; static int const kCrosswise = Base::kCrosswise; static int const kFactor = Base::kFactor; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicandCrosswise packed(TensorCoord const &extent) { return TensorOpMultiplicandCrosswise(extent[0]); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(coord); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return coord; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(extent); } }; //////////////////////////////////////////////////////////////////////////////// /// Template mapping a column-major view of pitch-linear memory to /// TensorOpMultiplicandCrosswise template <int ElementSize, int Crosswise> struct ColumnMajorTensorOpMultiplicandCrosswise { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicandCrosswise<ElementSize, Crosswise>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE ColumnMajorTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static ColumnMajorTensorOpMultiplicandCrosswise packed( TensorCoord const &extent) { return ColumnMajorTensorOpMultiplicandCrosswise(extent.row()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.row(), coord.column())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.contiguous(), coord.strided()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.row(), extent.column())); } }; //////////////////////////////////////////////////////////////////////////////// /// Template mapping a row-major view of pitch-linear memory to /// TensorOpMultiplicandCrosswise template <int ElementSize, int Crosswise> struct RowMajorTensorOpMultiplicandCrosswise { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = MatrixCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // using Base = TensorOpMultiplicandCrosswise<ElementSize, Crosswise>; /// This layout is optimized for 128b accesses static int const kAccessSize = Base::kAccessSize; using TileShape = typename Base::TileShape; using PartitionShape = typename Base::PartitionShape; // // Static constants // static int const kElementSize = Base::kElementSize; static int const kElementsPerAccess = Base::kElementsPerAccess; using PartitionCount = typename Base::PartitionCount; using AccessCount = typename Base::AccessCount; private: // // Data members // Base layout_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicandCrosswise(Index ldm = 0) : layout_(ldm) {} /// Ctor CUTLASS_HOST_DEVICE RowMajorTensorOpMultiplicandCrosswise(Stride stride) : layout_(stride) {} /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static RowMajorTensorOpMultiplicandCrosswise packed( TensorCoord const &extent) { return RowMajorTensorOpMultiplicandCrosswise(extent.column()); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { return layout_(PitchLinearCoord(coord.column(), coord.row())); } /// Inverse of layout function, mapping linear offset to logical coordinate CUTLASS_HOST_DEVICE TensorCoord inverse(LongIndex offset) const { PitchLinearCoord coord = layout_.inverse(offset); return MatrixCoord(coord.strided(), coord.contiguous()); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride &stride() { return layout_.stride(); } /// Compute the number of contiguous elements needed to store a tensor with /// the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return layout_.capacity(PitchLinearCoord(extent.column(), extent.row())); } }; //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear memory. template <int ElementSize, int InterleavedK> struct TensorOpMultiplicandColumnMajorInterleaved { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // /// This layout is optimized for 128b accesses static int const kAccessSize = 128; // // Static constants // static int const kElementSize = ElementSize; static int const kElementsPerAccess = kAccessSize / kElementSize; //static int const kThreadBlockStrided = ThreadBlockStrided; static int const kInterleavedK = InterleavedK; private: // // Data members // /// Stride data member Stride stride_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandColumnMajorInterleaved(Index ldm = 0): stride_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandColumnMajorInterleaved(Stride stride): stride_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicandColumnMajorInterleaved packed(TensorCoord const &extent) { return TensorOpMultiplicandColumnMajorInterleaved(extent[0] * kInterleavedK); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { int const rows_per_smem_cache_line = 128 / kInterleavedK; int row_id = coord.strided() / rows_per_smem_cache_line; int col_id = (coord.strided() % rows_per_smem_cache_line) * kInterleavedK + coord.contiguous(); int access_block_id = col_id >> 4; int swizzle_access_block_id = access_block_id ^ (row_id & 1); int swizzle_col_id = swizzle_access_block_id << 4; return row_id * 128 + swizzle_col_id; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return (extent[1] / kInterleavedK) * stride_[0]; } }; //////////////////////////////////////////////////////////////////////////////// /// Template based on element size (in bits) - defined in terms of pitch-linear memory. template <int ElementSize, int InterleavedK> struct TensorOpMultiplicandRowMajorInterleaved { /// Logical rank of tensor static int const kRank = 2; /// Rank of stride vector static int const kStrideRank = 1; /// Index type used for coordinates using Index = int32_t; /// Long index type used for offsets using LongIndex = int64_t; /// Logical coordinate using TensorCoord = PitchLinearCoord; /// Stride vector using Stride = Coord<kStrideRank, Index, LongIndex>; // // Invariants // /// This layout is optimized for 128b accesses static int const kAccessSize = 128; // // Static constants // static int const kElementSize = ElementSize; static int const kElementsPerAccess = kAccessSize / kElementSize; //static int const kThreadBlockStrided = ThreadBlockStrided; static int const kInterleavedK = InterleavedK; private: // // Data members // /// Stride data member Stride stride_; public: // // Methods // /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandRowMajorInterleaved(Index ldm = 0): stride_(ldm) { } /// Ctor CUTLASS_HOST_DEVICE TensorOpMultiplicandRowMajorInterleaved(Stride stride): stride_(stride) { } /// Helper returns a layout to a tightly packed tensor CUTLASS_HOST_DEVICE static TensorOpMultiplicandRowMajorInterleaved packed(TensorCoord const &extent) { return TensorOpMultiplicandRowMajorInterleaved(extent[1] * kInterleavedK); } /// Returns the offset of a coordinate in linear memory. /// Assumes coordinate has convention (contiguous, strided) CUTLASS_HOST_DEVICE LongIndex operator()(TensorCoord const &coord) const { int const rows_per_smem_cache_line = 128 / kInterleavedK; int row_id = coord.strided() / rows_per_smem_cache_line; int col_id = (coord.strided() % rows_per_smem_cache_line) * kInterleavedK + coord.contiguous(); int access_block_id = col_id >> 4; int swizzle_access_block_id = access_block_id ^ (row_id & 1); int swizzle_col_id = swizzle_access_block_id << 4; return row_id * 128 + swizzle_col_id; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride stride() const { return stride_; } /// Returns the stride of the layout CUTLASS_HOST_DEVICE Stride & stride() { return stride_; } /// Compute the number of contiguous elements needed to store a tensor with the given size CUTLASS_HOST_DEVICE LongIndex capacity(TensorCoord const &extent) const { return (extent[0] / kInterleavedK) * stride_[0]; } }; //////////////////////////////////////////////////////////////////////////////// } // namespace layout } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/layout/tensor_op_multiplicand_sm75.h/0
{ "file_path": "include/cutlass/layout/tensor_op_multiplicand_sm75.h", "repo_id": "include", "token_count": 10607 }
38
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Kernel performing a reduction over densely packed tensors in global memory */ #pragma once #include "cutlass/device_kernel.h" #include "cutlass/reduction/kernel/reduce_split_k.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace reduction { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ReductionKernel_ > class ReduceSplitK { public: using ReductionKernel = ReductionKernel_; using Shape = typename ReductionKernel::Shape; using ReductionOp = typename ReductionKernel::ReductionOp; using OutputOp = typename ReductionKernel::OutputOp; using ElementWorkspace = typename ReductionKernel::ElementWorkspace; using ElementAccumulator = typename ReductionKernel::ElementAccumulator; using ElementOutput = typename ReductionKernel::ElementOutput; using WorkspaceTensorRef = typename ReductionKernel::WorkspaceTensorRef; using OutputTensorRef = typename ReductionKernel::OutputTensorRef; using StrideIndex = typename ReductionKernel::StrideIndex; /// Argument structure struct Arguments { // // Data members // MatrixCoord problem_size{0,0}; int partitions{1}; size_t partition_stride{0}; WorkspaceTensorRef workspace{}; OutputTensorRef destination{}; OutputTensorRef source{}; typename OutputOp::Params output{}; typename ReductionOp::Params reduction{}; // // Methods // /// Default ctor Arguments() = default; CUTLASS_HOST_DEVICE Arguments( MatrixCoord const & problem_size ): problem_size(problem_size) { } CUTLASS_HOST_DEVICE Arguments( MatrixCoord problem_size_, int partitions_, size_t partition_stride_, WorkspaceTensorRef workspace_, OutputTensorRef destination_, OutputTensorRef source_, typename OutputOp::Params output_ = typename OutputOp::Params(), typename ReductionOp::Params reduction_ = typename ReductionOp::Params() ): problem_size(problem_size_), partitions(partitions_), partition_stride(partition_stride_), workspace(workspace_), destination(destination_), source(source_), output(output_), reduction(reduction_) { } }; private: /// Kernel parameters object typename ReductionKernel::Params params_; public: /// Constructs Reduction SplitK ReduceSplitK() { } /// Determines whether the ReduceSplitK can execute the given problem. static Status can_implement(Arguments const &args) { return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { // needs no additional workspace return 0; } /// Initializes Reduction state from arguments. Status initialize( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { // initialize the params structure from the arguments params_ = typename ReductionKernel::Params( args.problem_size, args.partitions, args.partition_stride, args.workspace, args.destination, args.source, args.output, args.reduction ); return Status::kSuccess; } /// Initializes Reduction kernel state from arguments. Status update(Arguments const &args, void *workspace = nullptr) { // update the params structure from the arguments params_.workspace.reset(args.workspace.non_const_ref().data()); params_.destination.reset(args.destination.non_const_ref().data()); params_.source.reset(args.source.non_const_ref().data()); params_.output = args.output; params_.reduction = args.reduction; return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { // // Launch reduction kernel // dim3 block = ReductionKernel::block_shape(); dim3 grid = ReductionKernel::grid_shape(params_.problem_size); Kernel<ReductionKernel><<< grid, block, 0, stream >>>(params_); cudaError_t result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace reduction } // namespace cutlass
include/cutlass/reduction/device/reduce_split_k.h/0
{ "file_path": "include/cutlass/reduction/device/reduce_split_k.h", "repo_id": "include", "token_count": 2047 }
39
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines a structure containing strides, bounds, and a pointer to tensor data. */ #pragma once #include <cstdint> #include "cutlass/cutlass.h" #include "cutlass/complex.h" #include "cutlass/tensor_ref.h" /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Element_> struct PlanarComplexReference { // // Type definitions // using Element = Element_; using ComplexElement = complex<Element>; // // Data members // Element *real; Element *imag; // // Methods // CUTLASS_HOST_DEVICE PlanarComplexReference( Element *real_ = nullptr, Element *imag_ = nullptr ): real(real_), imag(imag_) { } /// Loads the complex element CUTLASS_HOST_DEVICE operator complex<Element>() const { return complex<Element>{*real, *imag}; } /// Stores a complex element to the location pointed to by the reference CUTLASS_HOST_DEVICE PlanarComplexReference &operator=(complex<Element> const &rhs) { *real = rhs.real(); *imag = rhs.imag(); return *this; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// /* \brief TensorRef is a template for objects pointing to the start of tensors of arbitrary rank and layout within memory. A TensorRef combines a pointer and a Layout concept */ template < /// Data type of element stored within tensor (concept: NumericType) typename Element_, /// Defines a mapping from logical coordinate to linear memory (concept: Layout) typename Layout_ > class TensorRefPlanarComplex { public: /// Data type of individual access using Element = Element_; /// Complex element type using ComplexElement = complex<Element>; /// Mapping function from logical coordinate to linear memory using Layout = Layout_; static_assert(sizeof_bits<Element>::value >= 8, "Planar complex not suitable for subbyte elements at this time"); /// Reference type to an element using Reference = PlanarComplexReference<Element>; /// Logical rank of tensor index space static int const kRank = Layout::kRank; /// Index type using Index = typename Layout::Index; /// Long index used for pointer offsets using LongIndex = typename Layout::LongIndex; /// Coordinate in logical tensor space using TensorCoord = typename Layout::TensorCoord; /// Layout's stride vector using Stride = typename Layout::Stride; /// TensorRef to constant data using ConstTensorRef = TensorRefPlanarComplex< typename platform::remove_const<Element>::type const, Layout>; /// TensorRef to non-constant data using NonConstTensorRef = TensorRefPlanarComplex< typename platform::remove_const<Element>::type, Layout>; /// Require at least rank=1. Mathematically, a rank=0 tensor would be considered to be a /// scalar, but degenerate cases such as these are difficult to accommodate without /// extensive C++ metaprogramming or support for zero-length arrays. static_assert(kRank > 0, "Cannot define a zero-rank TensorRef"); private: /// Pointer Element* ptr_; /// Layout object maps logical coordinates to linear offsets Layout layout_; /// Offset to imaginary part LongIndex imaginary_stride_; public: // // Methods // /// Constructs a TensorRef with a pointer and layout object. CUTLASS_HOST_DEVICE TensorRefPlanarComplex( Element *ptr = nullptr, ///< pointer to start of tensor Layout const &layout = Layout(), ///< layout object containing stride and mapping function LongIndex imaginary_stride = 0 ): ptr_(ptr), layout_(layout), imaginary_stride_(imaginary_stride) { } /// Converting constructor from TensorRef to non-constant data. CUTLASS_HOST_DEVICE TensorRefPlanarComplex( NonConstTensorRef const &ref ///< TensorRef to non-const data ): ptr_(ref.data()), layout_(ref.layout()), imaginary_stride_(ref.imaginary_stride_) { } /// Returns a reference to constant-valued tensor. CUTLASS_HOST_DEVICE ConstTensorRef const_ref() const { return ConstTensorRef(ptr_, layout_, imaginary_stride_); } CUTLASS_HOST_DEVICE NonConstTensorRef non_const_ref() const { return NonConstTensorRef( const_cast<typename platform::remove_const<Element>::type *>(ptr_), layout_, imaginary_stride_); } /// Updates only the pointer CUTLASS_HOST_DEVICE void reset(Element* ptr = nullptr, LongIndex imaginary_stride = 0) { ptr_ = ptr; imaginary_stride_ = imaginary_stride; } /// Updates the pointer and layout object CUTLASS_HOST_DEVICE void reset(Element* ptr, Layout const &layout, LongIndex imaginary_stride) { ptr_ = ptr; layout_ = layout; imaginary_stride_ = imaginary_stride; } /// Returns true if the TensorRef is non-null CUTLASS_HOST_DEVICE bool good() const { return ptr_ != nullptr; } /// Returns the pointer to referenced data CUTLASS_HOST_DEVICE Element * data() const { return ptr_; } /// Returns the pointer to referenced data CUTLASS_HOST_DEVICE Element * imaginary_data() const { return ptr_ + imaginary_stride_; } /// Returns a reference to the element at a given linear index CUTLASS_HOST_DEVICE Reference data(LongIndex idx) const { return Reference(ptr_ + idx, ptr_ + idx + imaginary_stride_); } /// Returns the layout object CUTLASS_HOST_DEVICE Layout & layout() { return layout_; } /// Returns the layout object CUTLASS_HOST_DEVICE Layout layout() const { return layout_; } /// Gets the stride to an imaginary element LongIndex imaginary_stride() const { return imaginary_stride_; } /// Gets the stride to an imaginary element LongIndex &imaginary_stride() { return imaginary_stride_; } /// Returns the layout object's stride vector CUTLASS_HOST_DEVICE Stride stride() const { return layout_.stride(); } /// Returns the layout object's stride vector CUTLASS_HOST_DEVICE Stride & stride() { return layout_.stride(); } /// Returns the layout object's stride in a given physical dimension CUTLASS_HOST_DEVICE Index stride(int dim) const { return layout_.stride().at(dim); } /// Returns the layout object's stride in a given physical dimension CUTLASS_HOST_DEVICE Index & stride(int dim) { return layout_.stride().at(dim); } /// Computes the offset of an index from the origin of the tensor CUTLASS_HOST_DEVICE LongIndex offset(TensorCoord const& coord) const { return layout_(coord); } /// Returns a reference to the element at a given Coord CUTLASS_HOST_DEVICE Reference at(TensorCoord const& coord) const { return data(offset(coord)); } /// Returns a reference to the element at a given Coord CUTLASS_HOST_DEVICE Reference operator[](TensorCoord const& coord) const { return data(offset(coord)); } /// Adds an offset to each pointer CUTLASS_HOST_DEVICE TensorRefPlanarComplex & add_pointer_offset(LongIndex offset_) { ptr_ += offset_; return *this; } /// Adds an offset to each pointer CUTLASS_HOST_DEVICE TensorRefPlanarComplex & add_coord_offset(TensorCoord const &coord) { add_pointer_offset(offset(coord)); return *this; } /// Returns a TensorRef offset by a given amount CUTLASS_HOST_DEVICE TensorRefPlanarComplex operator+(TensorCoord const& b) const { TensorRefPlanarComplex result(*this); result.add_coord_offset(b); return result; } /// Returns a TensorRef offset by a given amount CUTLASS_HOST_DEVICE TensorRefPlanarComplex & operator+=(TensorCoord const& b) { add_coord_offset(b); return *this; } /// Returns a TensorRef offset by a given amount CUTLASS_HOST_DEVICE TensorRefPlanarComplex operator-(TensorCoord const& b) const { TensorRefPlanarComplex result(*this); result.add_pointer_offset(-offset(b)); return result; } /// Returns a TensorRef offset by a given amount CUTLASS_HOST_DEVICE TensorRefPlanarComplex & operator-=(TensorCoord const& b) { add_pointer_offset(-offset(b)); return *this; } /// TensorRef to real-valued tensor CUTLASS_HOST_DEVICE cutlass::TensorRef<Element, Layout> ref_real() const { return cutlass::TensorRef<Element, Layout>(data(), layout()); } /// TensorRef to real-valued tensor CUTLASS_HOST_DEVICE cutlass::TensorRef<Element, Layout> ref_imag() const { return cutlass::TensorRef<Element, Layout>(imaginary_data(), layout()); } }; /////////////////////////////////////////////////////////////////////////////////////////////////// /// Constructs a TensorRef, deducing types from arguments. template < typename Element, typename Layout > CUTLASS_HOST_DEVICE TensorRefPlanarComplex<Element, Layout> make_TensorRefPlanarComplex( Element *ptr, Layout const &layout, int64_t imaginary_stride) { return TensorRefPlanarComplex<Element, Layout>(ptr, layout, imaginary_stride); } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/tensor_ref_planar_complex.h/0
{ "file_path": "include/cutlass/tensor_ref_planar_complex.h", "repo_id": "include", "token_count": 3414 }
40
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates calculating the address and predicates to the load of tiles from pitch-linear rank=2 tensors. This iterator uses masks to guard out-of-bounds accesses and visits the last "residue" tile first, with the objective of minimizing predicate mask updates during steady-state operation. A precomputed "Params" object minimizes the amount of state that must be stored in registers, and integer addition is used to advance the pointer through memory. */ #pragma once #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/matrix_shape.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h" //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// PredicatedTileAccessIterator2dThreadTile /// template <typename Shape, typename Element, typename Layout, int AdvanceRank, typename ThreadMap, typename AccessType> class PredicatedTileAccessIterator2dThreadTile; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator2dThreadTile for pitch-linear data. /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIterator2dThreadTile<Shape_, Element_, layout::PitchLinear, AdvanceRank, ThreadMap_, AccessType_> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::PitchLinear; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using StrideIndex = typename Layout::Stride::Index; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; static int const kPredicatesPerByte = 4; static int const kPredicatesPerWord = 4 * kPredicatesPerByte; /// Number of 32b words containing predicates static int const kPredicateByteCount = (ThreadMap::Iterations::kCount * ThreadMap::ThreadAccessShape::kStrided + kPredicatesPerByte - 1) / kPredicatesPerByte; static int const kPredicateWordCount = (kPredicateByteCount + 3) / 4; static unsigned const kPredicateMask = (1u << kPredicatesPerByte) - 1u; static_assert(kPredicateWordCount <= 4, "Too many predicates."); /// Predicate vector stores mask to guard accesses using Mask = Array<uint32_t, kPredicateWordCount>; /// Uses a non-template class struct Params : PredicatedTileAccessIteratorParams { public: friend PredicatedTileAccessIterator2dThreadTile; using Base = PredicatedTileAccessIteratorParams; // Default ctor CUTLASS_HOST_DEVICE Params() { } /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : Base(layout.stride(0), MakePredicatedTileAccessIteratorDesc<Shape, Element, Layout, kAdvanceRank, ThreadMap>()() ) { } CUTLASS_HOST_DEVICE Params(Base const &base) : Base(base) { } }; private: /// Internal pointer type permits fast address arithmetic using BytePointer = char *; private: // // Data members // /// Parameters object with precomputed internal state Params const &params_; /// Internal pointer to first access of tile BytePointer pointer_; /// Guard predicates uint32_t predicates_[kPredicateWordCount]; /// Size of tensor TensorCoord extent_; /// Initial offset for each thread TensorCoord thread_offset_; /// Index of residue tile int residue_tile_idx_; /// Used for out-of-order visitation bool is_residue_tile_; /// Iteration in the contiguous dimension int iteration_contiguous_; /// Iteration in the strided dimension int iteration_strided_; /// Tracks iterations within the thread loop int iteration_thread_; private: /// Computes predicates based on internally tracked per-thread offset. CUTLASS_HOST_DEVICE void compute_predicates_( /// optionally, simplify predicate calculation during 'steady state' phase bool is_steady_state = false) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = 0u; } CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { CUTLASS_PRAGMA_UNROLL for (int ts = 0; ts < ThreadMap::ThreadAccessShape::kStrided; ts++) { TensorCoord iteration_coord(c * ThreadMap::Delta::kContiguous, ts + s * ThreadMap::Delta::kStrided); TensorCoord coord = thread_offset_ + iteration_coord; bool guard; if (is_steady_state) { if (kAdvanceRank == 0) { guard = (coord.strided() < extent_.strided()); } else { guard = (coord.contiguous() < extent_.contiguous()); } } else { guard = (coord.strided() < extent_.strided() && coord.contiguous() < extent_.contiguous()); } int pred_idx = ts + c * ThreadMap::ThreadAccessShape::kStrided + s * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided; int word_idx = pred_idx / kPredicatesPerWord; int residual = pred_idx % kPredicatesPerWord; int byte_idx = residual / kPredicatesPerByte; int bit_idx = residual % kPredicatesPerByte; predicates_[word_idx] |= (unsigned(guard) << (byte_idx * 8 + bit_idx)); } } } } public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, /// ID of each participating thread int thread_id, /// Initial offset of threadblock TensorCoord const &threadblock_offset) : params_(params), pointer_(reinterpret_cast<BytePointer>( const_cast<NonConstPointer>(pointer))), extent_(extent), is_residue_tile_(true) { TensorCoord residue_offset; if (kAdvanceRank) { residue_tile_idx_ = (extent_[kAdvanceRank] - threadblock_offset[kAdvanceRank] - 1) / Shape::kStrided; residue_offset = make_Coord(0, residue_tile_idx_ * Shape::kStrided); } else { residue_tile_idx_ = (extent_[kAdvanceRank] - threadblock_offset[kAdvanceRank] - 1) / Shape::kContiguous; residue_offset = make_Coord(residue_tile_idx_ * Shape::kContiguous, 0); } // Per-thread offset in logical coordinates of tensor thread_offset_ = threadblock_offset + residue_offset + ThreadMap::initial_offset(thread_id); // update internal pointers Layout layout(params_.stride_); add_pointer_offset(layout(thread_offset_)); compute_predicates_(false); set_iteration_index(0); } /// Construct a PredicatedTileAccessIterator2dThreadTile with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile( /// Precomputed parameters object Params const &params, /// Pointer to start of tensor Pointer pointer, /// Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id) : PredicatedTileAccessIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { int residual = index % (ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided); iteration_strided_ = index / (ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided); iteration_contiguous_ = residual / ThreadMap::ThreadAccessShape::kStrided; iteration_thread_ = residual % ThreadMap::ThreadAccessShape::kStrided; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += int(sizeof(Element)) * pointer_offset; } /// Advances an iterator along logical dimensions of matrix in units of whole tiles CUTLASS_DEVICE void add_tile_offset( TensorCoord const &tile_offset) { if (is_residue_tile_) { TensorCoord residue_offset; if (kAdvanceRank) { residue_offset = TensorCoord(0, residue_tile_idx_ * Shape::kStrided); } else { residue_offset = TensorCoord(residue_tile_idx_ * Shape::kContiguous, 0); } thread_offset_ -= residue_offset; Layout layout(params_.stride_); add_pointer_offset(-layout(residue_offset)); compute_predicates_(true); if (kAdvanceRank) { pointer_ += params_.inc_advance_ * (tile_offset.strided() - 1); pointer_ += Shape::kContiguous * tile_offset.contiguous(); } else { pointer_ += params_.inc_advance_ * (tile_offset.contiguous() - 1); pointer_ += Shape::kStrided * tile_offset.strided(); } } else { if (kAdvanceRank) { pointer_ += params_.inc_advance_ * tile_offset.strided(); pointer_ += Shape::kContiguous * tile_offset.contiguous(); } else { pointer_ += params_.inc_advance_ * tile_offset.contiguous(); pointer_ += Shape::kStrided * tile_offset.strided(); } } is_residue_tile_ = false; } CUTLASS_HOST_DEVICE AccessType *get() const { AccessType *ret_val = reinterpret_cast<AccessType *>( pointer_ + (iteration_thread_ * params_.stride_ + iteration_contiguous_ * ThreadMap::Delta::kContiguous) * int(sizeof(Element))); return ret_val; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile &operator++() { iteration_thread_++; if (iteration_thread_ < ThreadMap::ThreadAccessShape::kStrided) return *this; iteration_thread_ = 0; ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) return *this; // Enter here only if (iteration_contiguous_ == // ThreadMap::Iteration::kContiguous) iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { pointer_ += params_.inc_strided_; return *this; } // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) // which means we enter the next tile. iteration_strided_ = 0; // advance to next tile pointer_ += params_.inc_next_; // now return to start tile - if the iterator is subsequently advanced, this // subtraction as well as the subsequent integer addition are both elided by // the compiler. pointer_ -= params_.inc_advance_; return *this; } /// Increment and return an instance to self. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile operator++(int) { PredicatedTileAccessIterator2dThreadTile self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = enable ? 0u : predicates_[i]; } } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = 0xffffffff; } } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { predicates_[i] = mask[i]; } } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kPredicateWordCount; ++i) { mask[i] = predicates_[i]; } } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { int pred_idx = iteration_thread_ + iteration_contiguous_ * ThreadMap::ThreadAccessShape::kStrided + iteration_strided_ * ThreadMap::Iterations::kContiguous * ThreadMap::ThreadAccessShape::kStrided; int word_idx = pred_idx / kPredicatesPerWord; int residual = pred_idx % kPredicatesPerWord; int byte_idx = residual / kPredicatesPerByte; int bit_idx = residual % kPredicatesPerByte; bool pred = (predicates_[word_idx] & (1u << (byte_idx * 8 + bit_idx))) != 0; return pred; } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator2dThreadTile for pitch-linear data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIterator2dThreadTile<Shape_, Element_, layout::ColumnMajor, AdvanceRank, ThreadMap_, AccessType_> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = PredicatedTileAccessIterator2dThreadTile< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 0 : 1), ThreadMap, AccessType>; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator2dThreadTile; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default ctor CUTLASS_HOST_DEVICE Params() { } /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))){} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const &base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.row(), extent.column()), thread_id, layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())) {} /// Construct a PredicatedTileAccessIterator2dThreadTile with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile operator++(int) { PredicatedTileAccessIterator2dThreadTile self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// /// Specialization of PredicatedTileAccessIterator2dThreadTile for pitch-linear data. /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept | /// MaskedTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, typename AccessType_> class PredicatedTileAccessIterator2dThreadTile<Shape_, Element_, layout::RowMajor, AdvanceRank, ThreadMap_, AccessType_> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajor; static int const kAdvanceRank = AdvanceRank; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorView = TensorView<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Pointer = Element *; using NonConstPointer = typename platform::remove_const<Element>::type *; using UnderlyingIterator = PredicatedTileAccessIterator2dThreadTile< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::PitchLinear, (kAdvanceRank == 0 ? 1 : 0), ThreadMap, AccessType>; /// Predicate vector stores mask to guard accesses using Mask = typename UnderlyingIterator::Mask; /// Parameters object is precomputed state and is host-constructible class Params { private: friend PredicatedTileAccessIterator2dThreadTile; /// Parameters object typename UnderlyingIterator::Params params_; public: /// Default ctor CUTLASS_HOST_DEVICE Params() { } /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(Layout const &layout) : params_(layout::PitchLinear(layout.stride(0))){} /// Construct the Params object given a pitch-linear tensor's layout CUTLASS_HOST_DEVICE Params(typename UnderlyingIterator::Params::Base const &base) : params_(base) {} }; private: // // Data members // /// Underlying pitch-linear tile iterator UnderlyingIterator iterator_; public: /// Constructs a TileIterator from its precomputed state, threadblock offset, /// and thread ID CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile( ///< Precomputed parameters object Params const &params, ///< Pointer to start of tensor Pointer pointer, ///< Extent of tensor TensorCoord extent, ///< ID of each participating thread int thread_id, ///< Initial offset of threadblock TensorCoord const &threadblock_offset) : iterator_(params.params_, pointer, layout::PitchLinearCoord(extent.column(), extent.row()), thread_id, layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())) {} /// Construct a PredicatedTileAccessIterator2dThreadTile with zero threadblock offset CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile( Params const &params, ///< Precomputed parameters object Pointer pointer, ///< Pointer to start of tensor TensorCoord extent, ///< Extent of tensor int thread_id ///< ID of each participating thread ) : PredicatedTileAccessIterator2dThreadTile(params, pointer, extent, thread_id, make_Coord(0, 0)) {} /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(int index) { iterator_.set_iteration_index(index); } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances an iterator along logical dimensions of matrix in units of whole /// tiles CUTLASS_HOST_DEVICE void add_tile_offset(TensorCoord const &tile_offset) { iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); } /// Returns a pointer CUTLASS_HOST_DEVICE AccessType *get() const { return reinterpret_cast<AccessType *>(iterator_.get()); } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. /// /// The first time this method is called, predicates are updated, and the /// iterator's internal pointer is reverted to the first "steady state" tile. /// Subsequent calls are lightweight and must only update the internal /// pointer. CUTLASS_HOST_DEVICE PredicatedTileAccessIterator2dThreadTile operator++(int) { PredicatedTileAccessIterator2dThreadTile self(*this); operator++(); return self; } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } /// Clears the predicate set efficiently CUTLASS_HOST_DEVICE void enable_mask() { iterator_.enable_mask(); } /// Sets the predicate mask, overriding value stored in predicate iterator CUTLASS_HOST_DEVICE void set_mask(Mask const &mask) { iterator_.set_mask(mask); } /// Gets the mask CUTLASS_HOST_DEVICE void get_mask(Mask &mask) { iterator_.get_mask(mask); } /// Returns whether access is valid or not CUTLASS_HOST_DEVICE bool valid() { return iterator_.valid(); } }; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
include/cutlass/transform/threadblock/predicated_tile_access_iterator_2dthreadtile.h/0
{ "file_path": "include/cutlass/transform/threadblock/predicated_tile_access_iterator_2dthreadtile.h", "repo_id": "include", "token_count": 9733 }
41
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing storing of tiles from pitch-linear rank=2 tensors. */ #pragma once #include "cutlass/transform/threadblock/regular_tile_iterator.h" #include "cutlass/transform/threadblock/regular_tile_access_iterator_tensor_op.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace transform { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for congruous arrangements for TensorOps /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileIterator< Shape_, Element_, layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert(AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Internal details made public to facilitate introspection struct Detail { /// This iterator is specialized for an access size that is 128 bits in length. static int const kAccessSizeInBits = 128; static_assert( sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits, "This iterator requires a policy whose access size is 128bs"); }; private: /// Element type per access using AccessType = Array<Element, Layout::kElementsPerAccess>; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>; /// Underlying iterator to compute the addresses using TileAccessIterator = RegularTileAccessIterator<Shape, Element, Layout, kAdvanceRank, ThreadMap>; private: // // Data members // /// Data member to the tile access iterator TileAccessIterator address_iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : address_iterator_(ref, thread_id) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { address_iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { address_iterator_.add_tile_offset({0, 1}); return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); this->operator++(); return prev; } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { address_iterator_.add_tile_offset(coord); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { load_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_byte_offset(Fragment &frag, Index byte_offset) { address_iterator_.set_iteration_index(0); AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int access_idx = c + s * ThreadMap::Iterations::kContiguous; char const *byte_ptr = reinterpret_cast<char const *>(address_iterator_.get()) + byte_offset; AccessType const *access_ptr = reinterpret_cast<AccessType const *>(byte_ptr); frag_ptr[access_idx] = *access_ptr; ++address_iterator_; } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8); } CUTLASS_DEVICE void store_with_byte_offset(Fragment const &frag, Index byte_offset) { address_iterator_.set_iteration_index(0); AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int access_idx = c + s * ThreadMap::Iterations::kContiguous; char *byte_ptr = reinterpret_cast<char *>(address_iterator_.get()) + byte_offset; AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_ptr); *access_ptr = frag_ptr[access_idx]; ++address_iterator_; } } } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_byte_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile Iterator specialized for column-major congruous TensorOp formats. /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileIterator< Shape_, Element_, layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert(AdvanceRank == 0 || AdvanceRank == 1, "Specialization for column-major iterator may along advance along the " "columns(rank=0) or rows(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, Crosswise>, (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator( TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ): iterator_({ref.data(), ref.stride()}, thread_id) { } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.row(), coord.column()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); ++iterator_; return prev; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset( Fragment const &frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile Iterator specialized for row-major congruous TensorOp formats. /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileIterator< Shape_, Element_, layout::RowMajorTensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert(AdvanceRank == 0 || AdvanceRank == 1, "Specialization for row-major iterator may along advance along the " "columns(rank=0) or rows(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajorTensorOpMultiplicandCongruous< sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::TensorOpMultiplicandCongruous<sizeof_bits<Element_>::value, Crosswise>, (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator( TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ): iterator_({ref.data(), ref.stride()}, thread_id) { } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.column(), coord.row()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); ++iterator_; return prev; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset( Fragment const &frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for crosswise arrangements for TensorOps /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileIterator<Shape_, Element_, layout::TensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Internal details made public to facilitate introspection struct Detail { /// This iterator is specialized for an access size that is 128 bits in /// length. static int const kAccessSizeInBits = 128; static_assert(sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits, "This iterator requires a policy whose access size is 128bs"); }; private: /// Element type per access using AccessType = Array<Element, Layout::kElementsPerAccess>; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>; /// Underlying iterator to compute the addresses using TileAccessIterator = RegularTileAccessIterator<Shape, Element, Layout, kAdvanceRank, ThreadMap>; private: // // Data members // /// Data member to the tile access iterator TileAccessIterator address_iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : address_iterator_(ref, thread_id) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { address_iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { address_iterator_.add_tile_offset({1, 0}); return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); this->operator++(); return prev; } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { address_iterator_.add_tile_offset(coord); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { address_iterator_.set_iteration_index(0); AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int access_idx = c + s * ThreadMap::Iterations::kContiguous; frag_ptr[access_idx] = *(address_iterator_.get() + pointer_offset); ++address_iterator_; } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8); } CUTLASS_DEVICE void store_with_byte_offset(Fragment const &frag, Index byte_offset) { address_iterator_.set_iteration_index(0); AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int access_idx = c + s * ThreadMap::Iterations::kContiguous; char *byte_ptr = reinterpret_cast<char *>(address_iterator_.get()) + byte_offset; AccessType *access_ptr = reinterpret_cast<AccessType *>(byte_ptr); *access_ptr = frag_ptr[access_idx]; ++address_iterator_; } } } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile Iterator specialized for column-major crosswise TensorOp formats. /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileIterator<Shape_, Element_, layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for column-major iterator may along advance along the " "columns(rank=0) or rows(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::ColumnMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileIterator< layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element, layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value, Crosswise>, (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : iterator_({ref.data(), ref.stride()}, thread_id) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.row(), coord.column()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); ++iterator_; return prev; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile Iterator specialized for row-major crosswise TensorOp formats. /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int Alignment, int Crosswise> class RegularTileIterator<Shape_, Element_, layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for row-major iterator may along advance along the " "columns(rank=0) or rows(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::RowMajorTensorOpMultiplicandCrosswise< sizeof_bits<Element_>::value, Crosswise>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::TensorOpMultiplicandCrosswise<sizeof_bits<Element_>::value, Crosswise>, (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : iterator_({ref.data(), ref.stride()}, thread_id) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.column(), coord.row()}); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); ++iterator_; return prev; } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for k interleaved arrangements for TensorOps /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int InterleavedK, int Alignment> class RegularTileIterator< Shape_, Element_, layout::TensorOpMultiplicandRowMajorInterleaved<sizeof_bits<Element_>::value, InterleavedK>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::TensorOpMultiplicandRowMajorInterleaved<sizeof_bits<Element_>::value, InterleavedK>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Internal details made public to facilitate introspection struct Detail { /// This iterator is specialized for an access size that is 128 bits in /// length. static int const kAccessSizeInBits = 128; static_assert(sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess == kAccessSizeInBits, "This iterator requires a policy whose access size is 128bs"); }; private: /// Element type per access using AccessType = Array<Element, Layout::kElementsPerAccess>; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, ThreadMap::Iterations::kCount * Layout::kElementsPerAccess>; /// Underlying iterator to compute the addresses using TileAccessIterator = RegularTileAccessIterator<Shape, Element, Layout, kAdvanceRank, ThreadMap>; private: // // Data members // /// Data member to the tile access iterator TileAccessIterator address_iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : address_iterator_(ref, thread_id) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { address_iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { address_iterator_.add_pointer_offset(Shape::kCount); return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); this->operator++(); return prev; } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { address_iterator_.add_pointer_offset(coord.contiguous() * Shape::kCount); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { address_iterator_.set_iteration_index(0); AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int access_idx = c + s * ThreadMap::Iterations::kContiguous; frag_ptr[access_idx] = *(address_iterator_.get() + pointer_offset); ++address_iterator_; } } } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { AccessType const *frag_ptr = reinterpret_cast<AccessType const *>(&frag); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int access_idx = c + s * ThreadMap::Iterations::kContiguous; *(address_iterator_.get() + pointer_offset) = frag_ptr[access_idx]; ++address_iterator_; } } } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } }; //////////////////////////////////////////////////////////////////////////////// /// Tile iterator specialized for k interleaved arrangements for TensorOps /// /// /// Satisfies: ForwardTileIteratorConcept | /// ReadableContiguousTileIteratorConcept | /// WriteableContiguousTileIteratorConcept /// template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int InterleavedK, int Alignment> class RegularTileIterator< Shape_, Element_, layout::TensorOpMultiplicandColumnMajorInterleaved<sizeof_bits<Element_>::value, InterleavedK>, AdvanceRank, ThreadMap_, Alignment> { public: static_assert( AdvanceRank == 0 || AdvanceRank == 1, "Specialization for pitch-linear iterator may along advance along the " "contiguous(rank=0) or strided(rank=1) dimension."); using Shape = Shape_; using Element = Element_; using Layout = layout::TensorOpMultiplicandColumnMajorInterleaved<sizeof_bits<Element_>::value, InterleavedK>; static int const kAdvanceRank = AdvanceRank; static int const kAlignment = Alignment; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; using TensorRef = TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using ThreadMap = ThreadMap_; /// Underlying iterator type using UnderlyingIterator = RegularTileIterator< cutlass::MatrixShape<Shape::kColumn, Shape::kRow>, Element, layout::TensorOpMultiplicandRowMajorInterleaved<sizeof_bits<Element_>::value, InterleavedK>, (kAdvanceRank == 1 ? 0 : 1), ThreadMap >; public: /// Fragment object to be loaded or stored using Fragment = Array<Element, UnderlyingIterator::Fragment::kElements>; private: /// Underlying iterator UnderlyingIterator iterator_; public: /// Construct a TileIterator with zero threadblock offset CUTLASS_HOST_DEVICE RegularTileIterator(TensorRef ref, ///< Pointer to start of tensor int thread_id ///< ID of each participating thread ) : iterator_({ref.data(), ref.stride()}, thread_id) {} /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { iterator_.add_pointer_offset(pointer_offset); } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator &operator++() { ++iterator_; return *this; } /// Advances to the next tile in memory. CUTLASS_HOST_DEVICE RegularTileIterator operator++(int) { RegularTileIterator prev(*this); ++iterator_; return prev; } /// Adds a tile offset CUTLASS_DEVICE void add_tile_offset(TensorCoord const &coord) { iterator_.add_tile_offset({coord.strided(), coord.contiguous()}); } /// Loads a fragment from memory CUTLASS_DEVICE void load_with_pointer_offset(Fragment &frag, Index pointer_offset) { iterator_.load_with_pointer_offset(frag, pointer_offset); } /// Loads a fragment from memory CUTLASS_DEVICE void load(Fragment &frag) { load_with_pointer_offset(frag, 0); } /// Store a fragment to memory CUTLASS_DEVICE void store_with_pointer_offset(Fragment const &frag, Index pointer_offset) { iterator_.store_with_pointer_offset(frag, pointer_offset); } /// Store a fragment to memory CUTLASS_DEVICE void store(Fragment const &frag) { store_with_pointer_offset(frag, 0); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace transform } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h/0
{ "file_path": "include/cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h", "repo_id": "include", "token_count": 12671 }
42
# CuTe's support for Matrix Multiply-Accumulate instructions In this file, we explain in detail how we support our GPUs' Matrix Multiply-Accumulate (MMA) hardware instructions in CuTe. MMAs are architecture-specific. Different generations of GPU architectures introduce different sets of MMA instructions. However, CuTe features such as `Layout` makes it possible to expose MMAs for use in generic CUDA C++ code. We accomplish this in multiple steps. 1. We wrap each MMA's PTX instruction in an "Operation" struct. 2. For each Operation struct, we define a "Traits" struct that defines all of the meta-information needed to use the Operation. 3. Combining the above, an "Atom" is the combination of the PTX Operation struct with the meta-information Traits struct and provides methods to construct `cute::Tensor` "fragments" for that Operation and to use that Operation on existing `cute::Tensor`s. 4. Combining potentially multiple Atoms, a "TiledMMA" provides utilities for building more complex partitioning patterns by creating layouts and interleavings of Atoms. ## CuTe MMA Atoms CuTe exposes each MMA to generic CUDA C++ code as a pair of structs: an "Operation" struct, and an `MMA_Traits` struct templated on the Operation struct type. An "Operation" struct exposes the PTX instruction for that specific operation. It defines the arguments and interface it expects. Operation structs have minimal software dependencies -- they do not use layouts, tensors, or non-standard numeric data types -- and describe only the physical inputs and outputs to the instruction. Different structs have different names that describe what the MMA instruction does. We will explain the naming scheme below. A corresponding `MMA_Traits` struct specialization defines meta-information about the Operation, such as the logical compute types, the logical shape of the operation, and the `Layout`s of threads and values within the operation. The `MMA_Traits` struct takes the Operation as a template parameter. CuTe specializes `MMA_Traits` for each Operation type that it supports. Together, these two types comprise an "Atom" that decouples the complexity of thread and data layouts from the call site of the PTX instruction. The Atom's Traits struct exposes information that is relevant to a single MMA operation, no matter the granularity at which it operates. CuTe MMA atoms expose the semantics of a single MMA operation. This is true regardless of the hardware level at which the MMA operates. CuTe supports MMA atoms that operate at a variety of hardware levels, including * a single thread (e.g., fused multiply-add (FMA) instruction); * a quadpair (Volta); * a single warp (Ampere); and * a warpgroup (Hopper). ### Operation structs #### Location of files CuTe provides its Operations structs in the [`include/cute/arch`](../../../include/cute/arch) directory, in header files starting with `mma`. #### Operation struct's name A CuTe Operation struct's name principally encodes the PTX instruction it wraps. These often include * its first supported architecture, * the M, N, and K dimensions that it accepts, * the types that it takes, and * the arrangement of the A and B inputs. For example, the Volta section below will refer to the `SM70_8x8x4_F32F16F16F32_NT` Operation struct defined in [`include/cute/arch/mma_sm70.hpp`](../../../include/cute/arch/mma_sm70.hpp). * "SM70" refers to Volta. * "8x8x4" refers to M = 8, N = 8, and K = 4, the dimensions of the MMA operation that the quadpair performs (see below). This is reflected in the PTX as `.m8n8k4.`. * "F32F16F16F32" refers to the element types of the four matrix operands A, B, C, and D. An MMA computes D = C + A * B, so we read the types from left to right: D is F32 (`float`), A is F16 (half), B is F16 (half), and C is F32 (`float`). This is reflected in the PTX instruction name as `.f32.f16.f16.f32`. * "NT" means that the PTX instruction is designed for inputs A as M-major (not transposed, column-major) and inputs B as N-major (transposed, row-major). This is reflected in the PTX instruction name as `.col.row.`. #### Contents An Operation struct has the following members. ##### Type aliases An Operation struct has four public type aliases: `DRegisters`, `ARegisters`, `BRegisters`, and `CRegisters`. For example, the `SM70_8x8x4_F32F16F16F32_NT` Operation struct defined in [`include/cute/arch/mma_sm70.hpp`](../../../include/cute/arch/mma_sm70.hpp) defines these as follows. ```c++ using DRegisters = float[8]; using ARegisters = uint32_t[2]; using BRegisters = uint32_t[2]; using CRegisters = float[8]; ``` This shows how many values each thread will pass into the PTX instruction for each of the matrices A, B, C, and D. For this Operation, each thread passes 8 F32 values each for C and D (hence `float[8]`), and 4 F16 values each for A and B (hence `uint32_t[2]`; the instruction packs two 16-bit F16 values in each of the two 32-bit `uint32_t` values). ##### `fma` static member device function An operation struct defines a public `static void fma` function. It is marked with the `CUTE_HOST_DEVICE` macro, which adds the `__host__ __device__` annotations. Different Operations define `fma` to take different numbers of arguments, depending on the PTX MMA instruction. The implementation protects use of the PTX instruction with a macro, and raises an `assert` if `fma` is called when the macro is not defined. This ensures that tests and examples that use this Operation in an Atom can still compile, even if the PTX instruction is not available. ### Traits #### Location of files CuTe provides its Traits structs in the [`include/cute/atom`](../../../include/cute/atom) directory, in header files starting with `mma_traits`. #### Contents An `MMA_Traits` specialization defines the following public type aliases. * `ValTypeD`: Logical compute type of the D matrix * `ValTypeA`: Logical compute type of the A matrix * `ValTypeB`: Logical compute type of the B matrix * `ValTypeC`: Logical compute type of the C matrix * `Shape_MNK`: Logical MxNxK shape of the MMA operation * `ThrID`: Logical thread mapping within the single MMA operation (specifying the thread, quadpair, warp, or warpgroup view) * `ALayout`: Mapping of (thread,value) pairs to coordinates in the MxK A matrix * `BLayout`: Mapping of (thread,value) pairs to coordinates in the NxK B matrix * `CLayout`: Mapping of (thread,value) pairs to coordinates in the MxN C matrix #### Example The specialization of MMA_Traits for the `SM70_8x8x4_F32F16F16F32_NT` Operation lives in the header file [`include/cute/atom/mma_traits_sm70.hpp`](../../../include/cute/atom/mma_traits_sm70.hpp). It looks like this. ```c++ template <> struct MMA_Traits<SM70_8x8x4_F32F16F16F32_NT> { using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; using Shape_MNK = Shape<_8,_8,_4>; using ThrID = SM70_QuadPair; using ALayout = SM70_8x4_Col; using BLayout = SM70_8x4_Col; using CLayout = SM70_8x8_32b; }; ``` The next section will explain these type aliases in detail. ## Volta This and the following sections show examples of how to construct MMA atoms. We don't try to explain this for all GPU architectures and MMAs. Instead, we use selected examples to illustrate the process of developing new atoms. Volta architecture implements an HMMA instruction where a group of 8 threads called a quadpair (QP) collaborate to share data and perform an 8x8x4 (fp32 or fp16) matrix multiply-accumulate. (since a warp is 32 threads wide, it would perform an MMA across 4 QPs for a tile size of 16x16x4). We first take a look at how we would take the ISA semantics of thread and data partitioning for the HMMA instruction, and encode it in a Traits struct. The HMMA NT instruction has the thread-data layout: <p align="center"> <img src="../../images/cute/HMMA.8x8x4.NT.png" alt="HMMA.8x8x4.NT.png" height="400"/> </p> ### Types The HMMA NT above uses types: ```cpp using ValTypeD = float; using ValTypeA = half_t; using ValTypeB = half_t; using ValTypeC = float; ``` The rest of the `MMA_Traits` will be described in units of these types. ### Shape The HMMA NT above has shape 8x8x4: ```cpp // Logical shape of the MMA using Shape_MNK = Shape <_8,_8,_4>; ``` ### Thread ID If the 32 threads in a warp are logically indexed by [0 ... 31], then the above image contains threads [0,1,2,3]U[16,17,18,19]. These threads make up the 0th quadpair. We can write a thread mapping that maps eight logical thread ids [0,1,2,3,4,5,6,7] of the MMA to a quadpair thread index [0,1,2,3]U[16,17,18,19] of a warp. The layout function has 4 elements with a stride of 1 and 2 of those with a stride of 16. With this, we write a layout that represents a quadpair: ```cpp // Mapping from (logical thread id) -> (thread idx) using ThrID = Layout<Shape <_4, _2>, Stride<_1,_16>>; ``` Again, this layout function maps the logical thread id [0,8) of the MMA operation onto the quadpair thread index [0,4)U[16,20) of a warp. ### Accumulator Mapping Let us look at exactly how the 8 threads within a QP are mapped to the A, B and C matrices. For the C and D matrices, the above image is broken down a bit more below. On the left is shown the whole QP level view, and on the right is shown the values owned by just thread 0. <p align="center"> <img src="../../images/cute/HMMA.8x8x4.quadpair.C.png" alt="HMMA.8x8x4.quadpair.C.png" height="400"/> </p> The metainformation of this single instruction level view is what we want to encode in CuTe. Specifically, the QP level view in this diagram corresponds to the four MMA traits for [SM70_F32F16F16F32](../../../include/cute/arch/mma_sm70.hpp). These structs contain the `Element` types, the `Shape_MNK`, and the `ThrID` mapping we constructed above. Now, let us take a look at the definition of `CLayout`, the thread-data layout of accumulators. The job of `CLayout` is to construct a mapping between the `(logical_thr_id, logical_val_id)` and `(m, n)` coordinate in the C matrix which can then be used to build up more complicated layouts and operations like the 16x16x4 WMMA. We can start constructing a `CLayout` from the picture above. As with any CuTe layout, it is a pair of `Shape` and corresponding `Stride`. Let us just look at the shape for now. We know that the HMMA uses 8 threads each of which own 8 values. Therefore, the shape of our mapping must have a size of 8 along two modes. With this, we have ```cpp // (T8,V8) -> (m,n) using CLayout = Layout<Shape <_8, _8>, Stride<_?, _?>; // Stride to be filled in below ``` This is not to be confused with the logical 8x8 shape of the C matrix. This is 8-threads by 8-values. We now want to map those to (m,n) coordinates. Since CuTe layouts return indices rather than coordinates, we choose a column-major encoding of the (m,n) coordinates: ``` (logical_thr_id, logical_val_id) -> (m, n) == m + n * M ``` With this in place, we can start thinking about how to construct the strides in `CLayout`. Let's begin by looking at the strides between threads. Note that * `(T0,V0)` is located at `(m,n) = (0,0) = 0` * `(T1,V0)` is located at `(m,n) = (1,0) = 1` * `(T2,V0)` is located at `(m,n) = (0,2) = 16` * `(T3,V0)` is located at `(m,n) = (1,2) = 17` * `(T4,V0)` is located at `(m,n) = (4,0) = 4` * `(T5,V0)` is located at `(m,n) = (5,0) = 5` * `(T6,V0)` is located at `(m,n) = (4,2) = 20` * `(T7,V0)` is located at `(m,n) = (5,2) = 21` where `T4`,`T5`,`T6`,`T7` are the 4th,5th,6th,7th logical thread id of the MMA corresponding to thread indices of 16,17,18,19 of the warp (recorded in the `ThrID` mapping!). We note that the pattern can be transcribed to a layout. We can find the position of the 8 threads via ```cpp using CLayout = Layout<Shape <Shape <_2, _2, _2>, _8>, Stride<Stride<_1, _16, _4>, _?>; ``` With the exact same approach, we can construct the stride along the `logical value id` mode. * `(T0,V0)` is located at `(m,n) = (0,0) = 0` * `(T0,V1)` is located at `(m,n) = (0,1) = 8` * `(T0,V2)` is located at `(m,n) = (2,0) = 2` * `(T0,V3)` is located at `(m,n) = (2,1) = 10` * `(T0,V4)` is located at `(m,n) = (0,4) = 32` * `(T0,V5)` is located at `(m,n) = (0,5) = 40` * `(T0,V6)` is located at `(m,n) = (2,4) = 34` * `(T0,V7)` is located at `(m,n) = (2,5) = 42` We note that this pattern can also be transcribed to a layout. We can find the position of the 8 values via ```cpp // (T8,V8) -> (m,n) using CLayout = Layout<Shape <Shape <_2, _2,_2>, Shape <_2,_2, _2>>, Stride<Stride<_1,_16,_4>, Stride<_8,_2,_32>>>; ``` And that's all! We can verify that each `(tid,vid)` coordinate in this layout is reliably mapped to the correct (encoded) `(m,n)` coordinate. In the case of F16 accumulators, the layout is way less complex. Each row of accumulators `(m, :)` is held by a single thread, which makes the layout: ```cpp using CLayout = Layout<Shape <_8,_8>, Stride<_1,_8>>; ``` ### A and B Layout Mapping A and B matrix layouts depend on whether the sources are transposed or not. The diagram below shows the thread ID to data ownership map for A and B matrices in the case of NT and TN transposes. <p align="center"> <img src="../../images/cute/HMMA.8x8x4.quadpair.AB.png" alt="HMMA.8x8x4.quadpair.AB.png" height="400"/> </p> Let's look at the TN layout for A matrix first (right side in the diagram). Again, there are the same 8 logical threads, but each threads owns only 4 elements this time. The shape of `ALayout` will then be `Shape<_8, _4>`. As for the strides, we again need a similar mapping between `(m, k) == m + k * M`. Looking down the `M` mode, we go from `(T0, V0)` to `(T1, V0)` which is a stride of 1 for all 8 threads. For the `K` mode, as we go across, we go from `(T0, V0)` to `(T0, V1)`, which makes a stride of 8 for all 4 values. Therefore, the A layout is: ```cpp // (T8,V4) -> (m,k) using ALayout = Layout<Shape <_8,_4>, Stride<_1,_8>>; ``` Source B layout is constructed similarly for the TN HMMA, except that we want write it as `(N,K)` rather than `(K,N)` for convenience. For the strides, as we go across the `N` mode, we go from `(T0, V0)` to `(T1, V0)`, making this a stride of 1 for all 8 threads. As we go down the `K` mode, `(T0, V0)` to `(T0, V1)` which is a stride of 8 for all 4 values. So the B layout is the same as A: ```cpp // (T8,V4) -> (n,k) using BLayout = Layout<Shape <_8,_4>, Stride<_1,_8>>; ``` The layouts in the case of NT are a bit more complicated (left side of the diagram). Going down the `M` mode of `A`, we see the four values of `T0` first and then we see the four values of `T4`. This means we first have a stride of 1 for 4 values, followed by a stride of 4 from `T0` to `T4`. So we have two sub-strides along the `M` mode. For the `K` mode, as we go across, we simply increment the `thr_id`, keeping `val_id` the same, making the stride 8 for 4 threads. This makes the A layout: ```cpp // (T8,V4) -> (m,k) using ALayout = Layout<Shape <Shape <_4,_2>,_4>, Stride<Stride<_8,_4>,_1>>; ``` With the `(N,K)` ordering for B, the layout is the same. ```cpp // (T8,V4) -> (n,k) using BLayout = Layout<Shape <Shape <_4,_2>,_4>, Stride<Stride<_8,_4>,_1>>; ``` For the NN and TT transposes, they are simply combinations of the two layouts we have seen for A and B so far. ## Hopper Now, we are ready to take a look at the much larger GMMA operation (Group MMA) first introduced with Hopper architecture. These MMA instructions operate at the granularity of 128 threads (4 warps), which are collectively referred to as a warpgroup. ### Thread ID In the case of Hopper GMMAs, the thread IDs are assigned based on the simple 1D contiguous layout, which makes `thrID` trivial: ```cpp using ThrID = Layout<_128, _1>; ``` ### Accumulator Mapping Accumulators are mapped hierarchically in GMMA, starting from the concept of a core matrix and building up to a layout for the whole C matrix tile. Let's look at this core matrix first. We only consider fp16 accumulators here, but extensions of fp32 accumulators as trivial as we will see later. Each core matrix has the layout as shown in the diagram below. <p align="center"> <img src="../../images/cute/gmma_coremat_cd_fp16.png" alt="gmma_coremat_cd_fp16.png" height="600"/> </p> As in the Volta examples, the thread IDs are logical only, and which of the four warps they belong to in the warpgroup is not important. Then GMMA tiles this core matrix first vertically along the M mode, and then repeats that column of core matrices along the N mode to construct the full MxN tile. This tiling is shown in the image below. <p align="center"> <img src="../../images/cute/gmma_wg_n_slice.png" alt="gmma_wg_n_slice.png" height="600"/> </p> With this image, we are again ready to start building the `CLayout` for `SM90_64x128x16_F16F16F16F16_TN` atom. Same as before, we are constructing a mapping between the `(logical_thr_id, logical_val_id) -> (m, n)` coordinate spaces. To begin, let's follow the first few threads and values. We immediately see that they are arranged along the `N`-mode with pairs of values and four threads. This gives us ```cpp // (T128,V4) -> (M64,N8) using CLayout = Layout<Shape <Shape < _4, ...>, Shape < _2, ...>>, Stride<Stride<_128, ...>, Stride<_64, ...>>>; ``` To complete the first 8x8 core matrix, the four threads repeat eight times down the `M`-mode: ```cpp // (T128,V4) -> (M64,N8) using CLayout = Layout<Shape <Shape < _4, _8, ...>, Shape < _2, ...>>, Stride<Stride<_128, _1, ...>, Stride<_64, ...>>>; ``` Then, as we go to the next core matrix, we wrap back again to `T0`, but this time to `(T0, V2)`. ```cpp // (T128,V4) -> (M64,N8) using CLayout = Layout<Shape <Shape < _4, _8, ...>, Shape < _2, _2>>, Stride<Stride<_128, _1, ...>, Stride<_64, _8>>>; ``` Finally, we get this entire pattern repeating four times, once for each warp, down the `M`-mode starting at `(m,n) = (16,0) = 16`. where two core matrices that belong to the same warp are stacked on top of each other. This makes the size of the final sub-mode of M 4. As for the stride, this time we go to `(T32, V0)`, which makes it a stride of 32. ```cpp // (T128,V4) -> (M64,N8) using CLayout = Layout<Shape <Shape < _4, _8, _4>, Shape < _2, _2>>, Stride<Stride<_128, _1, _16>, Stride<_64, _8>>>; ``` This is the full `CLayout` for 64x8 accumulators. The GMMA instructions include 64xN variants with `N = [16,32,64,128,256]` where this 64x8 pattern is repeated giving each thread additional values. As this starts at `(m,n) = (0,8) = 512`, this is easy to account for in our `CLayout`. For example, the 64x128 `CLayout` is ```cpp // (T128,V64) -> (M64,N128) using CLayout = Layout<Shape <Shape < _4, _8, _4>, Shape < _2, _2, _16>>, Stride<Stride<_128, _1, _16>, Stride<_64, _8, _512>>>; ``` where we see 16 copies of the 64x8 tile. ### A and B Layout Mapping GMMA atoms that consume A and B sources directly from shared memory are a bit interesting. The GMMA Descriptor is constructed on an entire tile of A and/or B data in shared memory rather than being partitioned by threads. That is, every thread sees the entire tile of data and the tile is not reordered so that the descriptor can be constructed on it. In `ALayout` form, this can be expressed ```cpp // (T128,V64x8) -> (M64,K16) using ALayout = Layout<Shape <_128, Shape <_64,_16>>, Stride< _0, Stride< _1,_64>>>; ``` That is, all threads are mapped the to `(m,k) = (0,0) = 0` element and the values (and shape of the values) remains unchanged. The GMMA Descriptor Constructor can then inspect the `(M,K)` layout of this data and create an appropriate GMMA Descriptor or produce an error message saying the data is in an invalid layout for GMMA. ## `TiledMMA`s We can make more complex patterns by combining and interleaving multiple atoms. Let's start with `SM70_8x8x4_F32F16F16F32_NT`. ```cpp MMA_Atom mma = MMA_Atom<SM70_8x8x4_F32F16F16F32_NT>{}; print_latex(mma); ``` <p align="center"> <img src="../../images/cute/HMMA.8x8x4.NT_Atom.png" alt="HMMA.8x8x4.NT_Atom.png" height="400"/> </p> The above is equivalent to ```cpp TiledMMA mma = make_tiled_mma(SM70_8x8x4_F32F16F16F32_NT{}, Layout<Shape<_1,_1,_1>>{}, // Layout of Atoms Tile<_8,_8,_4>{}); // Tiler print_latex(mma); ``` as it is a single atom and has a natural tile size of 8x8x4. We can create an object akin to a WMMA by using four of these quadpair MMAs: ```cpp TiledMMA mma = make_tiled_mma(SM70_8x8x4_F32F16F16F32_NT{}, Layout<Shape <_2,_2>, Stride<_2,_1>>{}); // 2x2 n-major layout of Atoms print_latex(mma); ``` <p align="center"> <img src="../../images/cute/HMMA.8x8x4.NT_2x2.png" alt="HMMA.8x8x4.NT_2x2.png" height="400"/> </p> This `TiledMMA` replicates the `MMA_Atom` across threads as we can see the `T4` and `T8` and `T12` threads in the `C`-matrix that were not used before. Each quadrant of the `C`-matrix is a replica of the atom's partitioning pattern for a new quadpair and this replication follows a `(2,2):(2,1)` layout. The above represents a 16x16x4 MMA now, but we can immediately expand this "tile size" up to 32x32x4 instead: ```cpp TiledMMA mma = make_tiled_mma(SM70_8x8x4_F32F16F16F32_NT{}, Layout<Shape <_2,_2>, Stride<_2,_1>>{}, // 2x2 n-major layout of Atoms Tile<_32,_32,_4>{}); // 32x32x4 tiler print_latex(mma); ``` <p align="center"> <img src="../../images/cute/HMMA.8x8x4.NT_2x2_32x32x4.png" alt="HMMA.8x8x4.NT_2x2_32x32x4.png" height="400"/> </p> This `TiledMMA` replicates the previous `TiledMMA` across values instead of threads. We can see the `T0V8` and `T16V8` and `T8V8` values in the `C`-matrix that were not used before. Each quadrant of the `C`-matrix is a replica of the previous `TiledMMA`'s partitioning pattern for a new set of values. Continuing, we see that there are eight values that `T0` receives from the `A`-matrix. Those reads occur at coordinates ``` T0V0 => ( 0,0) T0V1 => ( 1,0) T0V2 => ( 2,0) T0V3 => ( 3,0) T0V4 => (16,0) T0V5 => (17,0) T0V6 => (18,0) T0V7 => (19,0) ``` which are separate, but we might prefer them to be next to each other. That is we would like to permute the `M`-mode to create another valid `TiledMMA`. ```cpp TiledMMA mma = make_tiled_mma(SM70_8x8x4_F32F16F16F32_NT{}, Layout<Shape <_2,_2>, Stride<_2,_1>>{}, // 2x2 n-major layout of Atoms Tile<Layout<Shape <_4,_4,_2>, Stride<_1,_8,_4>>, // Permutation on M, size 32 _32, // Permutation on N, size 32 identity _4>{}); // Permutation on K, size 4 identity print_latex(mma); ``` <p align="center"> <img src="../../images/cute/HMMA.8x8x4.NT_2x2_32Mx32x4.png" alt="HMMA.8x8x4.NT_2x2_32Mx32x4.png" height="400"/> </p> That layout `(4,4,2):(1,8,4)` is read like a scatter permutation, telling the m-coords of the original image where to go in the new image. ``` old m-coord: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 new m-coord: 0 1 2 3 8 9 10 11 16 17 18 19 24 25 26 27 4 5 6 7 12 13 14 15 20 21 22 23 28 29 30 31 ``` This permutes only the M-mode (in `A` and `C` accordingly) and brings the access of all threads to be contiguous in m-coordinates in the `A`-matrix. This is convenient when designing layouts for shared memory or registers, for example. The MMA instructions contained within the image above are now effectively interleaved in the logical m-coordinates. Of course, permutations in the N-mode and K-mode are also valid. To see how these `TiledMMA`s are used to partition data tensors, see the [`0x_gemm_tutorial.md`](./0x_gemm_tutorial.md).
media/docs/cute/0t_mma_atom.md/0
{ "file_path": "media/docs/cute/0t_mma_atom.md", "repo_id": "media", "token_count": 9068 }
43
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Profiler") [README](../../README.md#documentation) > **CUTLASS Profiler** # CUTLASS Profiler The CUTLASS Profiler is a command-line driven test and profiling environment for CUTLASS computations defined in the CUTLASS Instance Library. The CUTLASS Profiler is capable of executing each GEMM, Sparse Gemm, Conv2d, and Conv3d kernel. The CUTLASS Profiler may be compiled with: ```bash $ make cutlass_profiler -j ``` To limit compilation time, only one tile size (typically 128x128) and threadblock cluster size (typically 2x1x1) is instantiated for each data type, math instruction, and layout. To instantiate all sizes, set the following environment variable when running CMake from an empty `build/` directory. ```bash $ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" -DCUTLASS_LIBRARY_KERNELS=all -DCUTLASS_UNITY_BUILD_ENABLED=ON ... $ make cutlass_profiler -j ``` Enabling the unity build places multiple kernel instances in one compilation unit, thereby reducing size of the compiled binary and avoiding linker limitations on some platforms. The CUTLASS Profiler sources are stored in ```bash tools/ profiler/ ``` The CUTLASS Profiler usage statement may be obtained by executing `cutlass_profiler --help` and appears as follows. ```bash CUTLASS Performance Tool usage: cutlass_profiler [options] --help --mode=<string> Cutlass profiler execution mode. --mode=profile regular verification and profiling (default) --mode=dry_run no kernels are launched or workspaces allocated --mode=enumerate lists all operation kind and operations --mode=trace executes a single device-side computation with no other kernel launches --device-info Prints information on all GPUs present in the system --operation=<operation_kind> CUTLASS operation to profile. --kernels=<string_list> Filter operations by kernel names. For example, call all kernels with ("s1688" and "nt") or ("s844" and "tn" and "align8") in their operation name using --kernels="s1688*nt, s884*tn*align8" --ignore-kernels=<string_list> Excludes kernels whose names match anything in this list. Device: --device=<int> CUDA Device ID --compute-capability=<int> Override the compute capability. --llc-capacity=<capacity in KiB> Capacity of last-level cache in kilobytes. If this is non-zero, profiling phases cycle through different input tensors to induce capacity misses in the L2. Initialization: --initialization=<bool> Enables initialization (default: true). If false, device memory is not initialized after allocation. --initialization-provider=<provider> Selects initialization provider {host, device*}. (default: '*') --dist=<distribution> Data distribution of input tensors {uniform*, gaussian, identity, sequential} --dist=uniform,min:<double>,max:<double>,scale:<integer> --dist=gaussian,mean:<double>,stddev:<double>,scale:<integer> --dist=sequential,start:<double>,delta:<double>,scale:<integer> --dist=identity --seed=<int> Random number generator seed. Used to enforce deterministic initialization. Library: --library-algo-mode=<mode> Indicates algorithm mode used to call libraries such as cuBLAS and cuDNN. mode={default*,matching,best} --library-algos=<range-list> If --algorithm-mode=best, permits specifying a selection of algorithms. Profiling: --workspace-count=<workspace count> Number of discrete workspaces maintained to avoid cache-resident If zero (default), the amount is chosen for each workload based on capacity of the last-level cache. --profiling-iterations=<iterations> Number of iterations to profile each kernel. If zero, kernels are launched up to the profiling duration. --warmup-iterations=<iterations> Number of iterations to execute each kernel prior to profiling. --sleep-duration=<duration> Number of ms to sleep between profiling periods (ms). --profiling-enabled=<bool> If true, profiling is actually conducted. Verification: --verification-enabled=<bool> Whether to perform verification checks. --epsilon=<error> Error threshold. Setting to zero (default) requires bit-level equivalence. --nonzero-floor=<floor> Results whose absolute value is less than this quantity are treated as zero for comparisons. --save-workspace=<string> Specifies when to save the GEMM inputs and results to the filesystem. --save-workspace=never never save workspace (default) --save-workspace=incorrect save workspace for incorrect results --save-workspace=always always save workspace --verification-providers=<providers> List of providers used to verify result. (default: '*') Gemm verification-providers {cublas*} Conv2d verification-providers {cudnn*, device*, host} Report: --append=<bool> If true, result is appended to possibly existing file. Otherwise, any existing file is overwritten. --output=<path> Path to output file for machine readable results. Operation kind and '.csv' is appended. --junit-output=<path> Path to junit output file for result reporting. Operation kind and '.junit.xml' is appended. --report-not-run=<bool> If true, reports the status of all kernels including those that do not satisfy the given arguments. --tags=<column:tag,...> Inserts leading columns in output table and uniform values for each column. Useful for generating pivot tables. --verbose=<bool> Prints human-readable text to stdout. If false, nothing is written to stdout. About: --version CUTLASS 2.4.0 built on Nov 19 2020 at 11:59:00 Operations: gemm General matrix-matrix product. D = alpha * A*B + beta * C spgemm Structured sparse GEMM. D = alpha * A*B + beta * C conv2d Conv2d operation. Output(Tensor4D) = alpha * Input(Tensor4D) * Filter(Tensor4D) + beta * Input(Tensor4D) conv3d Conv3d operation. Output(Tensor5D) = alpha * Input(Tensor5D) * Filter(Tensor5D) + beta * Input(Tensor5D) For details about a particular function, specify the function name with --help. Example: $ cutlass_profiler --operation=Gemm --help $ cutlass_profiler --operation=Conv3d --help $ cutlass_profiler --operation=Conv2d --help ``` # GEMM The CUTLASS Profiler is capable of executing GEMM and Sparse GEMM problems. The CUTLASS Profiler can be built with cuBLAS enabled to use as a reference implementation. If CMake detects the cuBLAS library available in the system, it is included as a dependency. This may be explicitly overridden with CMake flag `CUTLASS_ENABLE_CUBLAS`. ## GEMM Arguments The complete set of arguments available to each operation may be viewed by specifying the operation name in addition to `--help`. The argument flags and their aliases usable for GEMM appear as follows. ```bash $ ./tools/profiler/cutlass_profiler --operation=gemm --help GEMM [enum] --gemm_kind Variant of GEMM (e.g. universal, gemm, planar_complex, planar_complex_array) [int] --m,--problem-size::m M dimension of the GEMM problem space [int] --n,--problem-size::n N dimension of the GEMM problem space [int] --k,--problem-size::k K dimension of the GEMM problem space [tensor] --A Tensor storing the A operand [tensor] --B Tensor storing the B operand [tensor] --C Tensor storing the C operand [scalar] --alpha,--epilogue::alpha Epilogue scalar alpha [scalar] --beta,--epilogue::beta Epilogue scalar beta [enum] --split_k_mode,--split-k-mode Variant of split K mode(serial, parallel) [int] --split_k_slices,--split-k-slices Number of partitions of K dimension [int] --batch_count,--batch-count Number of GEMMs computed in one batch [enum] --op_class,--opcode-class Class of math instruction (simt, tensorop, wmmatensorop, wmma). [enum] --accum,--accumulator-type Math instruction accumulator data type [int] --cta_m,--threadblock-shape::m Threadblock shape in the M dimension [int] --cta_n,--threadblock-shape::n Threadblock shape in the N dimension [int] --cta_k,--threadblock-shape::k Threadblock shape in the K dimension [int] --cluster_m,--cluster-shape::m Cluster shape in the M dimension [int] --cluster_n,--cluster-shape::n Cluster shape in the N dimension [int] --cluster_k,--cluster-shape::k Cluster shape in the K dimension [int] --stages,--threadblock-stages Number of stages of threadblock-scoped matrix multiply [int] --warps_m,--warp-count::m Number of warps within threadblock along the M dimension [int] --warps_n,--warp-count::n Number of warps within threadblock along the N dimension [int] --warps_k,--warp-count::k Number of warps within threadblock along the K dimension [int] --inst_m,--instruction-shape::m Math instruction shape in the M dimension [int] --inst_n,--instruction-shape::n Math instruction shape in the N dimension [int] --inst_k,--instruction-shape::k Math instruction shape in the K dimension [int] --min_cc,--minimum-compute-capability Minimum device compute capability [int] --max_cc,--maximum-compute-capability Maximum device compute capability Examples: Profile a particular problem size: $ cutlass_profiler --operation=Gemm --m=1024 --n=1024 --k=128 Schmoo over problem size and beta: $ cutlass_profiler --operation=Gemm --m=1024:4096:256 --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5 Schmoo over accumulator types: $ cutlass_profiler --operation=Gemm --accumulator-type=f16,f32 Run when A is f16 with column-major and B is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t): $ cutlass_profiler --operation=Gemm --A=f16:column --B=*:row Using various input value distribution: $ cutlass_profiler --operation=Gemm --dist=uniform,min:0,max:3 $ cutlass_profiler --operation=Gemm --dist=gaussian,mean:0,stddev:3 $ cutlass_profiler --operation=Gemm --dist=sequential,start:0,delta:1 Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size): $ cutlass_profiler --operation=Gemm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect Test your changes to gemm kernels with a quick functional test and save results in functional-test.csv: $ cutlass_profiler --operation=Gemm \ --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \ --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \ --k=8,16,32,64,128,256,288,384,504,512,520 \ --beta=0,1,2 --profiling-iterations=1 \ --providers=cutlass --output=functional-test.csv ``` The format of tensor argument is followed by `<type>:<layout>`. The type could be `f32` as 32-bit floating point, `s8` as 8-bit signed integer, etc. The available types can be referred to the `NumericTypeID_enumerants` in [util.cu](tools/library/src/util.cu). The layout could be `row` or `column`. ## Example CUDA Core GEMM Operation Example command line for profiling SGEMM kernels is as follows: ```bash $ ./tools/profiler/cutlass_profiler --kernels=sgemm --m=3456 --n=4096 --k=4096 ============================= Problem ID: 1 Provider: CUTLASS OperationKind: gemm Operation: cutlass_simt_sgemm_128x128_8x2_nn_align1 Status: Success Verification: ON Disposition: Passed cuBLAS: Passed Arguments: --m=3456 --n=4096 --k=4096 --A=f32:column --B=f32:column --C=f32:column --alpha=1 --beta=0 --split_k_slices=1 \ --batch_count=1 --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 --stages=2 --warps_m=4 \ --warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 --max_cc=1024 Bytes: 180355072 bytes FLOPs: 115992428544 flops Runtime: 6.73655 ms Memory: 24.934 GiB/s Math: 17218.4 GFLOP/s ``` Note, the arguments which appear in the output may be used as command line parameters for subsequent invocations. ## Example Tensor Core GEMM Operations To execute kernels targeting Tensor Core operations, supply the flag `--op_class=tensorop` in the command line. ```bash $ ./tools/profiler/cutlass_profiler --op_class=tensorop --m=3456 --n=4096 --k=8192 ============================= Problem ID: 1 Provider: CUTLASS OperationKind: gemm Operation: cutlass_tensorop_s16816gemm_f16_256x128_32x3_nn_align8 Status: Success Verification: ON Disposition: Passed cuBLAS: Passed Arguments: --m=3456 --n=4096 --k=8192 --A=f16:column --B=f16:column --C=f32:column --alpha=1 --beta=0 --split_k_slices=1 \ --batch_count=1 --op_class=tensorop --accum=f32 --cta_m=256 --cta_n=128 --cta_k=32 --stages=3 --warps_m=4 \ --warps_n=2 --warps_k=1 --inst_m=16 --inst_n=8 --inst_k=16 --min_cc=80 --max_cc=1024 Bytes: 180355072 bytes FLOPs: 231956545536 flops Runtime: 0.98647 ms Memory: 170.272 GiB/s Math: 235138 GFLOP/s ``` ## Covering the problem space All arguments may have single values or comma-delimited set of values. Integers may also be specified as an inclusive range with the following syntax `start:end:increment` or simply `start:end`. For example, the following sweeps over the range of the GEMM K dimension from 8 to 4096 in increments of 8 elements. ```bash $ ./tools/profiler/cutlass_profiler --kernels=cutlass_simt_sgemm_128x128_nn --m=4352 --n=4096 --k=8:4096:8 ``` ## Output By default, runtime and computed GFLOP/s are reported for each operation and problem size. Additionally, a table of comma separated values are reported at the end of the execution. This may be output to a file with the `--output=<filename.csv>` command line option as shown: ```bash $ ./tools/profiler/cutlass_profiler --kernels=cutlass_simt_sgemm_128x128_nn \ --m=3456 --n=4096 --k=8:4096:8 --output=report.csv ``` To faclitate generation of pivot tables and charts, additional columns may be prepended with the `--tags=<column>:<value>` option. One or more tags may be specified using a comma-delimited list. ```bash $ ./tools/profiler/cutlass_profiler --kernels=cutlass_simt_sgemm_128x128_nn \ --m=3456 --n=4096 --k=8:4096:8 --output=report.csv \ --tags=cutlass:2.2,date:2020-06-08 ``` ## CUTLASS 3.0 GEMM procedural names CUTLASS 3.0 introduces a new naming convention for GEMMs used by the profiler targeting the NVIDIA Hopper architecture and beyond so as to indicate new features of the kernel within the name (e.g., the cluster shape). To best illustrate this naming convention, we will walk through the meaning of each of the components in a GEMM kernel used by the profiler: ``` cutlass3x_sm90_tensorop_s64x128x16gemm_f16_f16_f32_f16_f32_128x128x64_2x1x1_0_ntn_align8 ``` The components within this name are as follows: * `cutlass3x`: indicates that the kernel was generated through the CUTLASS 3.0 API * `sm90`: indicates that the kernel targets NVIDIA GPUs with compute capability 90 * `tensorop`: indicates that the kernel makes use of NVIDIA Tensor Cores (as opposed to `simt`, which indicates the use of "CUDA cores") * `s`: indicates that the Tensor Core instruction being used accumulates in single precision (as opposed to `h`, which indicates half precision) * `64x128x16gemm`: indicates that the shape of the Tensor Core instruction being used (MxNxK) is 64x128x16 * `f16_f16_f32_f16_f16`: indicates that the data types for operands A, B, Accumulator, C and D (in that order). * `128x128x64`: indicates that the thread block shape used in the GEMM (MxNxK) is 128x128x64 * `2x1x1`: indicates that the cluster shape being used is 2x1x1 * `0`: indicates that the kernel uses the CollectiveBuilder's automatic stage calculation to determine the number of pipeline stages in the kernel. Note that `0` does not mean that no stages are used. A nonzero value indicates that automatic stage calculation is not performed and indicates the number of pipeline stages to be used. This 0 is only added to the kernel's procedural name, the profiler will still report the actual stage count when printing the kernel argument details (`--stages=N`) and kernel discovery will still support filtering through the `--stages` argument. * `ntn`: indicates that the layouts for operands A, B, and C are column major ("n"; non-transposed), row major ("t"; transposed), and column major, respectively. * `align8`: indicates that the maximum alignment between operands A and B is 8. Note that in some special cases where the input A/B types do not match that of the MMA instruction's, the MMA facing input type is added to the instruction string as well. ``` cutlass3x_sm90_tensorop_s64x128x8tf32gemm_f32_f32_f32_f32_f32_128x128x32_2x1x1_0_tnn_align4 ``` * `s64x128x8tf32gemm`: indicates that the MMA consumes inputs in `tf32` format, and therefore the kernel performs rounding of the `f32` values in global memory while loading them into shared memory. For custom mainloop or epilogue schedules, details of the opted-in schedule are appended to the end of the kernel name. For example, ``` cutlass3x_sm90_tensorop_h64x128x16gemm_f16_f16_f16_void_f16_128x128x64_1x1x1_0_nnn_align8_warpspecialized_cooperative_epi_tma ``` * `warpspecialized_cooperative`: Mainloop employs a persistent warp-specialized mainloop and kernel schedule. * `epi_tma`: Kernel epilogue employs TMA based vectorization. * `f16_f16_f16_void_f16`: In this case, C type is set to `void`, indicating that residual matrix support is disabled. # Convolution The CUTLASS Profiler is capable of executing 2-D and 3-D convolution problems for forwards and backwards operator variants. The CUTLASS Profiler can be built with cuDNN enabled to use as a reference implementation. If CMake detects the cuDNN library available in the system, it is included as a dependency. This may be explicitly overridden with CMake flag `CUTLASS_ENABLE_CUDNN`. ```bash $ cmake .. -DCUTLASS_LIBRARY_OPERATIONS=conv2d -DCUTLASS_ENABLE_CUDNN=OFF ... $ make -j16 cutlass_profiler ``` ## Convolution Arguments ```bash $ ./tools/profiler/cutlass_profiler --help --operation=Conv2d Conv2d [enum] --conv_kind Convolutional operator (fprop, dgrad, wgrad) [int] --n,--input_n Input N dimension of the Conv2d problem space [int] --h,--input_h Input H dimension of the Conv2d problem space [int] --w,--input_w Input W dimension of the Conv2d problem space [int] --c,--input_c Input C dimension of the Conv2d problem space [int] --k,--filter_k Filter K dimension of the Conv2d problem space [int] --r,--filter_r Filter R dimension of the Conv2d problem space [int] --s,--filter_s Filter S dimension of the Conv2d problem space [int] --p,--output_p Output P dimension of the Conv2d problem space [int] --q,--output_q Output Q dimension of the Conv2d problem space [int] --g,--groups Number of convolution groups [int] --pad_h Padding in H direction [int] --pad_w Padding in W direction [int] --stride_h Stride in H direction [int] --stride_w Stride in W direction [int] --dilation_h Dilation in H direction [int] --dilation_w Dilation in W direction [tensor] --Activation Tensor storing the Activation operand [tensor] --Filter Tensor storing the Filter operand [tensor] --Output Tensor storing the Output operand [enum] --conv_mode Convolution filter mode (conv, cross) [enum] --iterator_algorithm,--iterator_algo Convolution iterator algorithm (analytic, optimized) [scalar] --alpha,--epilogue::alpha Epilogue scalar alpha [scalar] --beta,--epilogue::beta Epilogue scalar beta [enum] --split_k_mode,--split-k-mode SplitK mode for serial or parallel reduction (serial, parallel) [int] --split_k_slices,--split-k-slices Number of partitions of K dimension [enum] --eq_gemm_provider,--eq-gemm-provider Enable profiling equivalent gemm by the following providers (cutlass) [enum] --op_class,--opcode-class Class of math instruction (simt, tensorop, wmmatensorop, wmma) [enum] --accum,--accumulator-type Math instruction accumulator data type [int] --cta_m,--threadblock-shape::m Threadblock shape in the M dimension [int] --cta_n,--threadblock-shape::n Threadblock shape in the N dimension [int] --cta_k,--threadblock-shape::k Threadblock shape in the K dimension [int] --cluster_m,--cluster-shape::m Cluster shape in the M dimension [int] --cluster_n,--cluster-shape::n Cluster shape in the N dimension [int] --cluster_k,--cluster-shape::k Cluster shape in the K dimension [int] --stages,--threadblock-stages Number of stages of threadblock-scoped matrix multiply [int] --warps_m,--warp-count::m Number of warps within threadblock along the M dimension [int] --warps_n,--warp-count::n Number of warps within threadblock along the N dimension [int] --warps_k,--warp-count::k Number of warps within threadblock along the K dimension [int] --inst_m,--instruction-shape::m Math instruction shape in the M dimension [int] --inst_n,--instruction-shape::n Math instruction shape in the N dimension [int] --inst_k,--instruction-shape::k Math instruction shape in the K dimension [int] --min_cc,--minimum-compute-capability Minimum device compute capability [int] --max_cc,--maximum-compute-capability Maximum device compute capability Examples: Profile a particular convolution (specify all the convolution parameters): $ cutlass_profiler --operation=Conv2d --Activation=f16:nhwc --Filter=f16:nhwc --Output=f16 --accumulator-type=f32 --n=32 --h=14 --w=14 --c=8 --k=64 --r=3 --s=3 --pad_h=1 --pad_w=1 --stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 ``` ## Example CUDA Core Convolution Operation Example command line for profiling forward propagation convolution kernels on CUDA cores is as follows: ```bash $ ./tools/profiler/cutlass_profiler --kernels=simt_sfprop --verification-providers=device --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 ============================= Problem ID: 1 Provider: CUTLASS OperationKind: conv2d Operation: cutlass_simt_sfprop_optimized_128x128_8x2_nhwc Status: Success Verification: ON Disposition: Passed reference_device: Passed Arguments: --conv_kind=fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --p=224 --q=224 --pad_h=1 --pad_w=1 \ --stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 --Activation=f32:nhwc --Filter=f32:nhwc --Output=f32:nhwc \ --conv_mode=cross --iterator_algorithm=optimized --alpha=1 --beta=0 --split_k_mode=serial --split_k_slices=1 \ --eq_gemm_provider=none --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 --stages=2 --warps_m=4 \ --warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 --max_cc=1024 Bytes: 2055798784 bytes FLOPs: 118482796544 flops Runtime: 8.13237 ms Memory: 235.431 GiB/s Math: 14569.3 GFLOP/s ``` ## Example Tensor Core Convolution Operation Example command line for profiling forward propagation convolution kernels runing on Tensor Cores is as follows: ```bash $ ./tools/profiler/cutlass_profiler --kernels=tensorop*fprop --verification-providers=device --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 ============================= Problem ID: 1 Provider: CUTLASS OperationKind: conv2d Operation: cutlass_tensorop_s16816fprop_optimized_f16_128x128_64x4_nhwc Status: Success Verification: ON Disposition: Passed reference_device: Passed Arguments: --conv_kind=fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --p=224 --q=224 --pad_h=1 --pad_w=1 \ --stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 --Activation=f16:nhwc --Filter=f16:nhwc --Output=f32:nhwc \ --conv_mode=cross --iterator_algorithm=optimized --alpha=1 --beta=0 --split_k_mode=serial --split_k_slices=1 \ --eq_gemm_provider=none --op_class=tensorop --accum=f32 --cta_m=128 --cta_n=128 --cta_k=64 --stages=4 \ --warps_m=2 --warps_n=2 --warps_k=1 --inst_m=16 --inst_n=8 --inst_k=16 --min_cc=80 --max_cc=1024 Bytes: 1130659840 bytes FLOPs: 118482796544 flops Runtime: 0.945071 ms Memory: 1114.21 GiB/s Math: 125369 GFLOP/s ``` # Copyright Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: BSD-3-Clause ``` Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ```
media/docs/profiler.md/0
{ "file_path": "media/docs/profiler.md", "repo_id": "media", "token_count": 13662 }
44
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Load nodes and implementations """ import ctypes from cutlass.backend.c_types import tuple_factory from cutlass.backend.epilogue import dtype2ctype, to_ctype_value from cutlass.backend.evt.ir.node import NodeBase, ImplBase class LoadImplBase(ImplBase): """ Base class for load node implementations """ reserved_names = ["accum", "C"] def __init__(self, node) -> None: super().__init__(node) self.element = node.element self.element_output = node.element_output self.stride = node.tensor.stride class AccumulatorImpl(LoadImplBase): """ Accumulator node implementation """ @staticmethod def match(node, problem_size: tuple): return node.name == "accum" and node.tensor.shape == problem_size class LoadSrcImpl(LoadImplBase): """ Load C implementation """ @property def name_camel(self) -> str: return "TensorC" @property def argument_type_c(self): stride_mnl = self.get_stride_mnl() tuple_type = tuple_factory(stride_mnl, self.stride_dtype) class _Argument(ctypes.Structure): _fields_ = [ ("ptr_C", ctypes.c_void_p), ("stride_C", tuple_type) ] def __init__(self, ptr) -> None: self.ptr_C = ptr self.stride_C = tuple_type(stride_mnl) return _Argument @staticmethod def match(node, problem_size: tuple): return node.name == "C" and node.tensor.shape == problem_size class AuxLoadImpl(LoadImplBase): """ Load arbitrary tensor """ @property def argument_type(self): stride_mnl = self.get_stride_mnl() name = self.name tuple_type = tuple_factory(stride_mnl, self.stride_dtype) element_type = self.element class _Argument(ctypes.Structure): _fields_ = [ ("ptr_aux", ctypes.c_void_p), ("null_default", dtype2ctype[element_type]), ("dAux", tuple_type) ] def __init__(self, kwargs) -> None: ptr = kwargs[name] self.ptr_aux = ptr self.null_default = to_ctype_value(0, element_type) self.dAux = tuple_type(stride_mnl) return _Argument @staticmethod def match(node, problem_size: tuple): if node.name in LoadImplBase.reserved_names: return False strideMN = node.tensor.stride[-2:] if (strideMN[0] == 1 and strideMN[1] != 0 or strideMN[0] != 0 and strideMN[1] == 1 ): return True else: return False class RowBroadcastImpl(LoadImplBase): """ Broadcast a row vector """ def __init__(self, node) -> None: super().__init__(node) self.stride_dtype = "int" @property def argument_type(self): stride_mnl = self.get_stride_mnl() name = self.name tuple_type = tuple_factory(stride_mnl, self.stride_dtype) element_type = self.element class _Argument(ctypes.Structure): _fields_ = [ ("ptr_row", ctypes.c_void_p), ("null_default", dtype2ctype[element_type]), ("dRow", tuple_type) ] def __init__(self, kwargs) -> None: ptr = kwargs[name] self.ptr_row = ptr self.null_default = to_ctype_value(0, element_type) self.dRow = tuple_type(stride_mnl) return _Argument @staticmethod def match(node, problem_size: tuple): if node.name in LoadImplBase.reserved_names: return False strideMN = node.tensor.stride[-2:] if strideMN == (0, 1): return True else: return False class ColumnBroadcastImpl(LoadImplBase): """ Broadcast a column vector """ def __init__(self, node) -> None: super().__init__(node) self.stride_dtype = "int" @property def argument_type(self): stride_mnl = self.get_stride_mnl() name = self.name tuple_type = tuple_factory(stride_mnl, self.stride_dtype) element_type = self.element class _Argument(ctypes.Structure): _fields_ = [ ("ptr_col", ctypes.c_void_p), ("null_default", dtype2ctype[element_type]), ("dCol", tuple_type) ] def __init__(self, kwargs) -> None: ptr = kwargs[name] self.ptr_col = int(ptr) self.null_default = to_ctype_value(0, element_type) self.dCol = tuple_type(stride_mnl) return _Argument @staticmethod def match(node, problem_size: tuple): if node.name in LoadImplBase.reserved_names: return False strideMN = node.tensor.stride[-2:] if strideMN == (1, 0): return True else: return False class ScalarBroadcastImpl(LoadImplBase): """ Broadcast a scalar """ def __init__(self, node) -> None: super().__init__(node) self.stride_dtype = "int" @property def argument_type(self): stride_mnl = self.get_stride_mnl() name = self.name tuple_type = tuple_factory(stride_mnl, self.stride_dtype) element_type = self.element if self.tensor.is_constant: value = self.tensor.value class _Argument(ctypes.Structure): _fields_ = [ ("scalars", dtype2ctype[element_type]), ("scalar_ptrs", ctypes.c_void_p), ("dScalar", tuple_type) ] def __init__(self, kwargs) -> None: self.scalars = to_ctype_value(value, element_type) self.scalar_ptrs = 0 self.dScalar = tuple_type(stride_mnl) else: class _Argument(ctypes.Structure): _fields_ = [ ("scalars", dtype2ctype[element_type]), ("scalar_ptrs", ctypes.c_void_p), ("dScalar", tuple_type) ] def __init__(self, kwargs) -> None: scalar_or_ptr = kwargs[name] if isinstance(scalar_or_ptr, float): self.scalars = to_ctype_value(scalar_or_ptr, element_type) self.scalar_ptrs = 0 else: self.scalar_ptrs = int(scalar_or_ptr) self.dScalar = tuple_type(stride_mnl) return _Argument @staticmethod def match(node, problem_size: tuple): if node.name in LoadImplBase.reserved_names: return False strideMN = node.tensor.stride[-2:] if strideMN == (0, 0): return True else: return False class LoadNode(NodeBase): """ Load Node """ cnt = 0 possible_impls = [ AccumulatorImpl, LoadSrcImpl, AuxLoadImpl, RowBroadcastImpl, ColumnBroadcastImpl, ScalarBroadcastImpl ] def __init__(self, name: str) -> None: if name is None: name = f"load{LoadNode.cnt}" LoadNode.cnt += 1 super().__init__(name) self.op = "load" def type_propagation(self, *args, **kwargs): """ Load node loads tensor under type `tensor.element` and returns an array of type `tensor.element`. """ if self.tensor is None: raise RuntimeError(f"The tensor of node {self.name} is unknown.") self.element = self.tensor.element self.element_output = self.tensor.element
python/cutlass/backend/evt/ir/load_nodes.py/0
{ "file_path": "python/cutlass/backend/evt/ir/load_nodes.py", "repo_id": "python", "token_count": 4401 }
45
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Classes containing valid operations for a given compute capability and data types. """ from itertools import combinations_with_replacement import logging from cuda import __version__ import cutlass_library from cutlass_library.library import ConvKind, IteratorAlgorithm, StrideSupport, GroupMode import cutlass from cutlass.utils.check import valid_stage_count from cutlass.utils.datatypes import td_from_profiler_td, td_from_profiler_op _generator_ccs = [50, 60, 61, 70, 75, 80, 90] # Strip any additional information from the CUDA version _cuda_version = __version__.split("rc")[0] class KernelsForDataType: """ Container class for keeping track of kernels that correspond to a particular combination of data types for operands A, B, and accumulator """ def __init__(self, datatype_comb: tuple, layout_comb: tuple): self.datatype_comb = datatype_comb self.layout_comb = layout_comb self.math_operations = set() # Dictionary mapping from alignment (int) to a list of kernels that fit the alignment # constraint for the data type combination self.kernels_by_alignment = {} def add(self, operation): """ Add an operation to the list of supported kernels """ alignment_key = f"{operation.A.alignment} {operation.B.alignment} {operation.C.alignment}" if alignment_key not in self.kernels_by_alignment: self.kernels_by_alignment[alignment_key] = [] self.kernels_by_alignment[alignment_key].append(operation) self.math_operations.add(operation.tile_description.math_instruction.math_operation) def alignments(self, operand: str): """ Returns an unsorted list of alignments supported by this data type combination :param operand: identifier of operand in question (e.g., A, B, C) :type operand: str :return: unsorted list of alignments supported by this data type combination :rtype: list """ operand_idx = self._operand_idx(operand) return [int(key.split(" ")[operand_idx]) for key in self.kernels_by_alignment.keys()] @property def all_operations(self): """ Returns a list of all operations supported by this data type combination :return: list of all operations supported by this data type combination :rtype: list """ ops = [] for _, alignment_ops in self.kernels_by_alignment.items(): ops.extend(alignment_ops) return ops def default_operation(self, math_operation: cutlass.MathOperation): key = sorted(list(self.kernels_by_alignment.keys()))[0] kernels = self.kernels_by_alignment[key] if math_operation is not None: kernels = [x for x in kernels if x.tile_description.math_instruction.math_operation == math_operation] return kernels[0] def operations(self, alignment_A: int, alignment_B: int, alignment_C: int, math_operation: cutlass.MathOperation): """ Returns operations satisfying the alignment constraints :param alignment_A: alignment constraint of operations to return :type alignment_A: int :param alignment_B: alignment constraint of operations to return :type alignment_B: int :param alignment_C: alignment constraint of operations to return :type alignment_C: int :param math_operation: math operation to consider :type math_operation: cutlass.MathOperation :return: list of operations :rtype: list """ key = f"{alignment_A} {alignment_B} {alignment_C}" if key not in self.kernels_by_alignment: og_key = key # Reconcile A, B, and C alignments by trying to align to the minimum min_alignment = min(alignment_A, alignment_B, alignment_C) key = f"{min_alignment} {min_alignment} {min_alignment}" if key not in self.kernels_by_alignment: # Finally, go through all available alignment combinations and find # one for which all values are less than those passed in. key = None alignments = sorted([tuple(int(x) for x in k.split(" ")) for k in self.kernels_by_alignment.keys()], reverse=True) for align_A, align_B, align_C in alignments: if alignment_A % align_A == 0 and alignment_B % align_B == 0 and alignment_C % align_C == 0: key = f"{align_A} {align_B} {align_C}" break if key is None: raise Exception( f"No operations of alignment {og_key} found for data type and layout " f"combination {self.datatype_comb} {self.layout_comb}. Compatible alignments " f"are {self.kernels_by_alignment.keys()}" ) ops = self.kernels_by_alignment[key] if math_operation is not None: ops = [op for op in ops if op.tile_description.math_instruction.math_operation == math_operation] return ops def _operand_idx(self, key: str) -> int: operand_list = ["A", "B", "C"] if key not in operand_list: raise Exception(f"Unexpected operand {operand}") return operand_list.index(key) def find_alignment(self, shape: tuple, layout: cutlass.LayoutType, operand=str) -> int: """ Returns the most preferable alignment for a given shape and layout :param shape: extent of each dimension of the tensor :type shape: tuple :param layout: layout of the tensor :type layout: cutlass.LayoutType :param operand: descriptor of the operand in question :type operand: str :return: maximum alignment supported by the data type combination and tensor size :rtype: int """ operand_idx = self._operand_idx(operand) # Determine the leading dimension of the shape if layout == cutlass.LayoutType.ColumnMajor: ld = shape[-2] elif layout == cutlass.LayoutType.RowMajor: ld = shape[-1] elif layout == cutlass.LayoutType.TensorNHWC: ld = shape[-1] else: raise Exception(f"Unexpected or unsupported layout {layout}") for alignments in sorted(list(self.kernels_by_alignment.keys()), reverse=True): alignment = int(alignments.split(" ")[operand_idx]) if ld % alignment == 0: return alignment # Default to alignment of 1 if no others match return 1 def sort(self): """ Sorts each list of kernels in `kernels_by_alignment` in descending order of threadblock shape """ key = lambda op: ( op.tile_description.threadblock_shape[0] * op.tile_description.threadblock_shape[1] * op.tile_description.threadblock_shape[2] ) for alignment in self.kernels_by_alignment.keys(): self.kernels_by_alignment[alignment].sort(key=key, reverse=True) def supports_math_operation(self, math_operation: cutlass.MathOperation) -> bool: """ Returns whether `math_operation` is supported by at least one operation. :param math_operation: math operation to consider :type math_operation: cutlass.MathOperation :return: whether math_operation is supported by at least one operation :rtype: bool """ return math_operation is None or math_operation in self.math_operations class ArchOptions: """ Structure for keeping track of kernels available on a given compute capability :param target_cc: compute capability of the device on which kernels will be run :type target_cc: int :param kernel_cc: compute capability of the kernels to generate :type kernel_cc: int :param operation_kind: type of operation to register :type operation_kind: cutlass_library.OperationKind :param gemm_kinds: types of GEMM operations that can be included :type gemm_kinds: list :param allowed_math_operations: types of primitive math operations allowed :type allowed_math_operations: list """ def __init__( self, target_cc: int, kernel_cc: int, operation_kind: cutlass_library.OperationKind, gemm_kinds: list, allowed_math_operations: list = [ cutlass_library.MathOperation.multiply_add, cutlass_library.MathOperation.multiply_add_saturate, cutlass_library.MathOperation.multiply_add_mixed_input_upcast, cutlass_library.MathOperation.multiply_add_fast_f32 ] ): self.cc = kernel_cc # Dictionary with following structure: # Key: OpcodeClass # Value: Dictionary with the following structure: # Key: tuple of ((DataType, DataType, DataType), (LayoutType, LayoutType, LayoutType), # representing ((element_a, element_b, element_accumulator), (layout_a, layout_b)) # Value: KernelsForDataType self.operations_by_opclass = {} self.op_class = None self.allowed_math_operations = allowed_math_operations # Identify the method within CUTLASS generator script that generates kernel # descriptions for the target CC generate_function_name = "GenerateSM" + str(kernel_cc) if not hasattr(cutlass_library.generator, generate_function_name): cutlass.logger.warning(f"No generator found for architecture {kernel_cc}") return generate_function = getattr(cutlass_library.generator, generate_function_name) # Initialize a default manifest and populate it with valid kernel descriptions # for the target CC args = [ "--kernels=all", f"--log-level={logging.getLevelName(cutlass.logger.level)}" ] manifest_args = cutlass_library.generator.define_parser().parse_args(args) manifest = cutlass_library.manifest.Manifest(manifest_args) generate_function(manifest, _cuda_version) if operation_kind not in manifest.operations: # No kernels generated for this architecture, this could be because the CUDA # toolkit is insufficient to support operations in this CC cutlass.logger.warning(f"No operations of type {operation_kind} found for CC {kernel_cc}") return # Only one CC should be returned, given the setup above of calling only the generation scripts # for a given CC if len(manifest.operations[operation_kind].keys()) != 1 or kernel_cc not in manifest.operations[operation_kind]: raise Exception(f"Error finding kernels for SM{kernel_cc}. Check that your CUDA toolkit version " "is sufficient for the architecture in question.") # Iterate through the available operations for this operation kind and # find available opclasses and data types for name, op_list in manifest.operations[operation_kind][kernel_cc].items(): for op in op_list: if operation_kind == cutlass_library.OperationKind.Gemm: if op.gemm_kind not in gemm_kinds: continue mi = op.tile_description.math_instruction if mi.math_operation not in self.allowed_math_operations: continue # Prune operations that don't fit in shared memory td = td_from_profiler_op(op) if not valid_stage_count(target_cc, kernel_cc, td, verbose=False)[0]: continue if mi.opcode_class not in self.operations_by_opclass: self.operations_by_opclass[mi.opcode_class] = {} datatype_comb = (mi.element_a, mi.element_b, mi.element_accumulator) layout_comb = (op.A.layout, op.B.layout) # Register TF32 kernels as F32 to enable F32 -> TF32 conversion + TF32 Tensor Core operations if datatype_comb == (cutlass_library.DataType.tf32, cutlass_library.DataType.tf32, cutlass_library.DataType.f32): # TF32 kernels only supported on SM80 and beyond if self.cc < 80: continue elif self.cc == 90: if (op.A.element != cutlass_library.DataType.f32 or op.B.element != cutlass_library.DataType.f32 or op.C.element != cutlass_library.DataType.f32): continue datatype_comb = (cutlass_library.DataType.f32, cutlass_library.DataType.f32, cutlass_library.DataType.f32) opclass_dict = self.operations_by_opclass[mi.opcode_class] key = (datatype_comb, layout_comb) if key not in opclass_dict: opclass_dict[key] = KernelsForDataType(datatype_comb, layout_comb) opclass_dict[key].add(op) # Set the default opclass to TensorOp, if available. Otherwise default to SIMT if cutlass_library.OpcodeClass.TensorOp in self.operations_by_opclass: self.op_class = cutlass_library.OpcodeClass.TensorOp else: self.op_class = cutlass_library.OpcodeClass.Simt # The profiler's generator may generate only a limited set of combinations of operands for SIMT kernels. # Here, we generate additional versions via a generic TileDescription. if cutlass_library.OpcodeClass.Simt not in self.operations_by_opclass: self.operations_by_opclass[cutlass_library.OpcodeClass.Simt] = {} if operation_kind == cutlass_library.OperationKind.Gemm: types = [ (cutlass_library.DataType.s8, cutlass_library.DataType.s8, cutlass_library.DataType.s8), (cutlass_library.DataType.s8, cutlass_library.DataType.s8, cutlass_library.DataType.s32), (cutlass_library.DataType.f16, cutlass_library.DataType.f16, cutlass_library.DataType.f16), (cutlass_library.DataType.f16, cutlass_library.DataType.f16, cutlass_library.DataType.f32), (cutlass_library.DataType.f32, cutlass_library.DataType.f32, cutlass_library.DataType.f32), (cutlass_library.DataType.f64, cutlass_library.DataType.f64, cutlass_library.DataType.f64), ] # Add FP8 A/B/C fp8_types = [cutlass_library.DataType.e4m3, cutlass_library.DataType.e5m2] for type_comb in combinations_with_replacement(fp8_types, 3): types.append(type_comb) # Add FP8 A/B with FP32 C for type_comb in combinations_with_replacement(fp8_types, 2): types.append(type_comb + (cutlass.DataType.f32,)) layouts = [ (cutlass_library.LayoutType.RowMajor, cutlass_library.LayoutType.RowMajor), (cutlass_library.LayoutType.RowMajor, cutlass_library.LayoutType.ColumnMajor), (cutlass_library.LayoutType.ColumnMajor, cutlass_library.LayoutType.RowMajor), (cutlass_library.LayoutType.ColumnMajor, cutlass_library.LayoutType.ColumnMajor), ] elif operation_kind == cutlass_library.OperationKind.Conv2d: types = [ (cutlass_library.DataType.f16, cutlass_library.DataType.f16, cutlass_library.DataType.f16), (cutlass_library.DataType.f16, cutlass_library.DataType.f16, cutlass_library.DataType.f32), (cutlass_library.DataType.f32, cutlass_library.DataType.f32, cutlass_library.DataType.f32), (cutlass_library.DataType.f64, cutlass_library.DataType.f64, cutlass_library.DataType.f64), ] layouts = [ (cutlass_library.LayoutType.TensorNHWC, cutlass_library.LayoutType.TensorNHWC), ] else: raise NotImplementedError(f"Operation kind {operation_kind} is currently unsupported.") alignment = 1 epilogue_functor = cutlass_library.EpilogueFunctor.LinearCombination swizzling_functor = cutlass_library.SwizzlingFunctor.Identity8 for type_comb in types: for layout_comb in layouts: comb = (type_comb, layout_comb) if comb in self.operations_by_opclass[cutlass_library.OpcodeClass.Simt]: continue A = cutlass_library.TensorDescription(type_comb[0], layout_comb[0], alignment) B = cutlass_library.TensorDescription(type_comb[1], layout_comb[1], alignment) C = cutlass_library.TensorDescription(type_comb[2], cutlass_library.LayoutType.ColumnMajor, alignment) math_inst = cutlass_library.MathInstruction( [1, 1, 1], type_comb[0], type_comb[1], type_comb[2], cutlass_library.OpcodeClass.Simt, cutlass_library.MathOperation.multiply_add ) td = cutlass_library.TileDescription( [128, 128, 8], 2, [4, 2, 1], math_inst, 50, 1024) # Prune operations that don't fit in shared memory if not valid_stage_count(target_cc, kernel_cc, td_from_profiler_td(td), verbose=False)[0]: continue new_kernels = KernelsForDataType(type_comb, layout_comb) if operation_kind == cutlass_library.OperationKind.Gemm: new_operation = cutlass_library.manifest.GemmOperation( cutlass_library.GemmKind.Universal, td.minimum_compute_capability, td, A, B, C, type_comb[2], epilogue_functor, swizzling_functor) new_kernels.add(new_operation) elif operation_kind == cutlass_library.OperationKind.Conv2d: for conv_kind in [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad]: new_operation = cutlass_library.manifest.Conv2dOperation( conv_kind, IteratorAlgorithm.Analytic, td.minimum_compute_capability, td, A, B, C, type_comb[2], StrideSupport.Strided, epilogue_functor, swizzling_functor, group_mode=GroupMode.SingleGroup ) new_kernels.add(new_operation) self.operations_by_opclass[cutlass_library.OpcodeClass.Simt][comb] = new_kernels # Sort all operations for oc in self.operations_by_opclass.keys(): for comb in self.operations_by_opclass[oc].keys(): self.operations_by_opclass[oc][comb].sort() def opclass_supports_combination( self, op_class: cutlass_library.OpcodeClass, datatype_comb: tuple, layout_comb: tuple, math_operation: cutlass_library.MathOperation ) -> bool: """ Returns whether the provided operation class supports the provided data type and layout combination :param op_class: operation class to consider :type op_class: cutlass_library.OpcodeClass :param datatype_comb: tuple of data types for (element_A, element_B, element_accumulator) :type datatype_comb: tuple[cutlass_library.DataType] :param layout_comb: tuple of data types for (layout_A, layout_B) :type layout_comb: tuple[cutlass_library.LayoutType] :param math_operation: math operation to consider or None if any can be considered :type math_operation: cutlass.MathOperation :return: set of operation classes that support the provided data type and layout combination :rtype: set """ if op_class not in self.operations_by_opclass: raise Exception(f"Unexpected or unsupported operation class {op_class}") if operations := self.operations_by_opclass[op_class].get((datatype_comb, layout_comb)): if math_operation is not None: return operations.supports_math_operation(math_operation) else: return True return False def supporting_opclasses( self, element_a: cutlass_library.DataType, element_b: cutlass_library.DataType, element_accumulator: cutlass_library.DataType, layout_a: cutlass_library.LayoutType, layout_b: cutlass_library.LayoutType, math_operation: cutlass_library.MathOperation, ) -> set: """ Returns a set of operation classes that support the provided data type combination :param element_a: data type of operand A :type element_a: cutlass_library.DataType :param element_b: data type of operand B :type element_b: cutlass_library.DataType :param element_accumulator: data type of accumulator :type element_accumulator: cutlass_library.DataType :param layout_a: layout of operand A :type layout_a: cutlass_library.LayoutType :param layout_b: layout of operand B :type layout_b: cutlass_library.LayoutType :param math_operation: math operation to consider :type math_operation: cutlass.MathOperation :return: set of operation classes that support the provided data type combination :rtype: set """ supporting_op_classes = set() datatype_comb = (element_a, element_b, element_accumulator) layout_comb = (layout_a, layout_b) for op_class in self.operations_by_opclass.keys(): if self.opclass_supports_combination(op_class, datatype_comb, layout_comb, math_operation): supporting_op_classes.add(op_class) return supporting_op_classes def operations( self, op_class: cutlass_library.OpcodeClass, element_a: cutlass_library.DataType, element_b: cutlass_library.DataType, element_accumulator: cutlass_library.DataType, layout_a: cutlass_library.LayoutType, layout_b: cutlass_library.LayoutType, math_operation: cutlass_library.MathOperation, ) -> KernelsForDataType: """ Returns whether the provided operation class supports the provided data type combination :param op_class: operation class to consider :type op_class: cutlass_library.OpcodeClass :param element_a: data type of operand A :type element_a: cutlass_library.DataType :param element_b: data type of operand B :type element_b: cutlass_library.DataType :param element_accumulator: data type of accumulator :type element_accumulator: cutlass_library.DataType :param layout_a: layout of operand A :type layout_a: cutlass_library.LayoutType :param layout_b: layout of operand B :type layout_b: cutlass_library.LayoutType :param math_operation: math operation to consider :type math_operation: cutlass.MathOperation :return: container of kernels by alignment supported by the provided combination of parameters :rtype: KernelsForDataType """ datatype_comb = (element_a, element_b, element_accumulator) layout_comb = (layout_a, layout_b) if not self.opclass_supports_combination(op_class, datatype_comb, layout_comb, math_operation): raise Exception( f"Data type layout combination {datatype_comb}, {layout_comb} " f"is not supported by opcode class {op_class} on CC {self.cc}." ) return self.operations_by_opclass[op_class][(datatype_comb, layout_comb)] class OptionRegistry: """ Container of all architecture-specific options :param target_cc: compute capability of the device on which operations will be run :type target_cc: int """ def __init__(self, target_cc: int): self.registry = {} gemm_kinds = [cutlass_library.GemmKind.Universal, cutlass_library.GemmKind.Universal3x] operation_kinds = [cutlass_library.OperationKind.Gemm, cutlass_library.OperationKind.Conv2d] # Construct options for each CC for kernel_cc in _generator_ccs: self.registry[kernel_cc] = {} for opkind in operation_kinds: self.registry[kernel_cc][opkind] = ArchOptions(target_cc, kernel_cc, opkind, gemm_kinds) def options_for_cc(self, cc: int, op_kind=cutlass_library.OperationKind.Gemm) -> ArchOptions: return self.registry.get(cc, None)[op_kind]
python/cutlass/library_defaults.py/0
{ "file_path": "python/cutlass/library_defaults.py", "repo_id": "python", "token_count": 10993 }
46
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Utilities for emitting GEMM kernels """ import collections import enum import functools import logging import operator import os.path import shutil try: import builtins if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True: raise ImportError("Disabling attempt to import cutlass_library") from cutlass_library.library import * except ImportError: from library import * _LOGGER = logging.getLogger(__name__) ################################################################################################### # # Data structure modeling a GEMM operation # ################################################################################################### # class GemmOperation: # def __init__(self, gemm_kind, arch, tile_description, A, B, C, element_epilogue, \ epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, D = None, kernel_schedule = KernelScheduleType.ScheduleAuto, epilogue_schedule = EpilogueScheduleType.ScheduleAuto, tile_scheduler = TileSchedulerType.Default ): kinds_3x = { GemmKind.Universal3x, GemmKind.SparseUniversal3x, } self.is_3x = gemm_kind in kinds_3x self.prefix = "3x" if self.is_3x else "" self.operation_kind = OperationKind.Gemm self.arch = arch self.tile_description = tile_description self.gemm_kind = gemm_kind self.A = A self.B = B self.C = C self.D = D if self.D == None: self.D = self.C if not self.is_3x: assert(kernel_schedule == KernelScheduleType.ScheduleAuto) assert(epilogue_schedule == EpilogueScheduleType.ScheduleAuto) self.kernel_schedule = kernel_schedule self.epilogue_schedule = epilogue_schedule self.element_epilogue = element_epilogue self.epilogue_functor = epilogue_functor if self.is_3x and epilogue_functor == EpilogueFunctor.LinearCombination: self.epilogue_functor = EpilogueFunctor3x.LinearCombination self.swizzling_functor = swizzling_functor self.tile_scheduler = tile_scheduler # def is_complex(self): complex_operators = [ MathOperation.multiply_add_complex, MathOperation.multiply_add_complex_gaussian, MathOperation.multiply_add_complex_fast_f32 ] return self.tile_description.math_instruction.math_operation in complex_operators # def is_mixed_input(self): return self.A.element != self.B.element # def is_planar_complex(self): return self.gemm_kind in (GemmKind.PlanarComplex, GemmKind.PlanarComplexArray) # def accumulator_type(self): accum = self.tile_description.math_instruction.element_accumulator if self.is_complex(): return get_complex_from_real(accum) return accum # def short_math_name(self): if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian: return "g%s" % ShortDataTypeNames[self.accumulator_type()] return ShortDataTypeNames[self.accumulator_type()] # def core_name(self): ''' The basic operation kind is prefixed with a letter indicating the accumulation type. ''' inst_shape = '' inst_operation = '' intermediate_type = '' math_operations_map = { MathOperation.xor_popc: 'xor', MathOperation.and_popc: 'and', MathOperation.multiply_add_fast_accum: 'fastaccum', } tensor_ops = [ OpcodeClass.TensorOp, OpcodeClass.WmmaTensorOp, OpcodeClass.SparseTensorOp, ] is_tensor_op = self.tile_description.math_instruction.opcode_class in tensor_ops if is_tensor_op: math_op = self.tile_description.math_instruction.math_operation math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else '' if self.is_3x: inst_shape = "{0}x{1}x{2}".format(*tuple(self.tile_description.math_instruction.instruction_shape)) else: inst_shape = "{0}{1}{2}".format(*tuple(self.tile_description.math_instruction.instruction_shape)) inst_shape += math_op_string if self.tile_description.math_instruction.element_a != self.A.element and \ self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator: intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, GemmKindNames[self.gemm_kind]) # Generates a string representing the MMA instruction. def extended_name(self): ''' Append data types if they differ from compute type. ''' if self.is_complex(): extended_name = "${core_name}" else: if self.C.element != self.tile_description.math_instruction.element_accumulator and \ self.A.element != self.tile_description.math_instruction.element_accumulator: extended_name = "${element_c}_${core_name}_${element_a}" if self.is_mixed_input(): extended_name += "_${element_b}" elif self.C.element == self.tile_description.math_instruction.element_accumulator and \ self.A.element != self.tile_description.math_instruction.element_accumulator: extended_name = "${core_name}_${element_a}" if self.is_mixed_input(): extended_name += "_${element_b}" else: extended_name = "${core_name}" extended_name = SubstituteTemplate(extended_name, { 'element_a': DataTypeNames[self.A.element], 'element_b': DataTypeNames[self.B.element], 'element_c': DataTypeNames[self.C.element], 'core_name': self.core_name() }) return extended_name def extended_name_3x(self): '''Generates a string representing the MMA atom. Assumes accumulator type is C type.''' extended_name = "{core_name}_{element_a}_{element_b}_{element_acc}_{element_c}_{element_d}".format( element_a = DataTypeNames[self.A.element], element_b = DataTypeNames[self.B.element], element_acc = DataTypeNames[self.accumulator_type()], element_c = DataTypeNames[self.C.element], element_d = DataTypeNames[self.D.element], core_name = self.core_name()) return extended_name def datatype_name_3x(self): '''Generates a string representing the MMA atom. Assumes accumulator type is C type.''' datatype_name = "{element_a}_{element_b}_{element_acc}_{element_c}_{element_d}".format( element_a = DataTypeNames[self.A.element], element_b = DataTypeNames[self.B.element], element_acc = DataTypeNames[self.accumulator_type()], element_c = DataTypeNames[self.C.element], element_d = DataTypeNames[self.D.element]) return datatype_name # Generates a short string representing the AB layout tags (e.g. nt or tn) def layout_name(self): if self.is_complex() or self.is_planar_complex(): return "%s%s" % ( ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)], ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)] ) return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout]) # Generates a short string representing the ABC layout tags (e.g. ntn or tnn) def layout_name_3x(self): if self.is_complex() or self.is_planar_complex(): return "{}{}{}".format( ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)], ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)], ShortComplexLayoutNames[(self.C.layout, self.C.complex_transform)]) else: return "{}{}{}".format( ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout], ShortLayoutTypeNames[self.C.layout]) # Generates a short string representing underlying kernel schedule type def kernel_schedule_name_3x(self): return KernelScheduleSuffixes[self.kernel_schedule] # Generates a short string representing underlying epilogue schedule type def epilogue_schedule_name_3x(self): return EpilogueScheduleSuffixes[self.epilogue_schedule] # Generate a short string representing the operation class def opcode_class_name(self): return OpcodeClassNames[self.tile_description.math_instruction.opcode_class] # Generates the full kernel function name def procedural_name(self): ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class] if self.arch >= 90: kernel_name_template = "cutlass{p}_sm{ar}_{op}_{ex}{ct}{cs}_{l}_{s}_align{al}{t}{k}{e}" return kernel_name_template.format( p = self.prefix, ar = self.arch, op = opcode_class_name, ex = self.extended_name_3x(), ct = '_' + 'x'.join([str(i) for i in self.tile_description.tile_shape]) if self.tile_description.tile_shape[0] > 0 else "", cs = '_' + 'x'.join([str(i) for i in self.tile_description.cluster_shape]), l = self.tile_description.stages, s = self.layout_name_3x(), al = str(max(self.A.alignment, self.B.alignment)), t = TileSchedulerSuffixes[self.tile_scheduler], k = self.kernel_schedule_name_3x(), e = self.epilogue_schedule_name_3x()) else: threadblock = self.tile_description.procedural_name() return "cutlass{p}_{op}_{ex}_{tb}_{l}_align{a}".format( p = self.prefix, op = opcode_class_name, ex = self.extended_name(), tb = threadblock, l = self.layout_name(), a = str(max(self.A.alignment, self.B.alignment))) # def configuration_name(self): ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' return self.procedural_name() def __hash__(self): return hash(self.configuration_name()) def __eq__(self, other): return self.configuration_name() == other.configuration_name() ################################################################################################### # # Data structure modeling a grouped GEMM operation # ################################################################################################### # class GroupedGemmOperation(GemmOperation): # def __init__(self, gemm_kind, arch, tile_description, A, B, C, element_epilogue, \ epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \ scheduler_mode = GroupScheduleMode.Device): super().__init__(gemm_kind, arch, tile_description, A, B, C, element_epilogue, \ epilogue_functor, swizzling_functor) self.scheduler_mode = scheduler_mode # def procedural_name(self): ''' The full procedural name indicates architecture, extended name, tile size, and layout. ''' base = super().procedural_name() return SubstituteTemplate( base + "_schedule${schedule}", { 'schedule': ShortGroupScheduleModeNames[self.scheduler_mode] }) ################################################################################################### # # Emits single instances of a CUTLASS device-wide operator # ################################################################################################### # class EmitGemmInstance: ''' Responsible for emitting a CUTLASS template definition''' def __init__(self, operation_suffix = ''): self.operation_suffix = operation_suffix self.includes = [] self.gemm_template = """ // Gemm operator ${operation_name} using Operation_${operation_name} = cutlass::gemm::device::Gemm< ${element_a}, ${layout_a}, ${element_b}, ${layout_b}, ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}< ${element_c}, ${epilogue_vector_length}, ${element_accumulator}, ${element_epilogue} >, ${swizzling_functor}, ${stages}, ${align_a}, ${align_b}, false, ${math_operation} ${residual} >; """ self.gemm_complex_template = """ // Gemm operator ${operation_name} using Operation_${operation_name} = cutlass::gemm::device::GemmComplex< ${element_a}, ${layout_a}, ${element_b}, ${layout_b}, ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}< ${element_c}, ${epilogue_vector_length}, ${element_accumulator}, ${element_epilogue} >, ${swizzling_functor}, ${stages}, ${transform_a}, ${transform_b}, ${math_operation} ${residual} >; """ # def instance_template(self): return """ ${compile_guard_start} manifest.append(new ${gemm_kind}<Operation_${operation_name}>("${operation_name}")); ${compile_guard_end} """ # def emit(self, operation): warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)] epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) residual = '' values = { 'operation_name': operation.procedural_name(), 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[operation.A.layout], 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[operation.B.layout], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[operation.C.layout], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'epilogue_vector_length': str(epilogue_vector_length), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], 'stages': str(operation.tile_description.stages), 'align_a': str(operation.A.alignment), 'align_b': str(operation.B.alignment), 'transform_a': ComplexTransformTag[operation.A.complex_transform], 'transform_b': ComplexTransformTag[operation.B.complex_transform], 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], 'residual': residual } template = self.gemm_complex_template if operation.is_complex() else self.gemm_template return SubstituteTemplate(template, values) ################################################################################################### class EmitSparseGemmInstance: ''' Responsible for emitting a CUTLASS template definition''' def __init__(self, operation_suffix = ''): self.operation_suffix = operation_suffix self.includes = [] self.gemm_template = """ // Gemm operator ${operation_name} using Operation_${operation_name} = cutlass::gemm::device::SparseGemm< ${element_a}, ${layout_a}, ${element_b}, ${layout_b}, ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}< ${element_c}, ${epilogue_vector_length}, ${element_accumulator}, ${element_epilogue} >, ${swizzling_functor}, ${stages}, ${align_a}, ${align_b}, false, ${math_operation} ${residual} >; """ # def instance_template(self): return """ ${compile_guard_start} manifest.append(new ${gemm_kind}<Operation_${operation_name}>("${operation_name}")); ${compile_guard_end} """ # def emit(self, operation): warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)] epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element]) residual = '' values = { 'operation_name': operation.procedural_name(), 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[operation.A.layout], 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[operation.B.layout], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[operation.C.layout], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'epilogue_vector_length': str(epilogue_vector_length), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], 'stages': str(operation.tile_description.stages), 'align_a': str(operation.A.alignment), 'align_b': str(operation.B.alignment), 'transform_a': ComplexTransformTag[operation.A.complex_transform], 'transform_b': ComplexTransformTag[operation.B.complex_transform], 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], 'residual': residual } template = self.gemm_template return SubstituteTemplate(template, values) ################################################################################################### # class EmitGemmUniversalInstance: ''' Responsible for emitting a CUTLASS template definition''' def __init__(self, operation_suffix = ''): self.operation_suffix = operation_suffix self.includes = [ "cutlass/cutlass.h", "cutlass/numeric_types.h", "cutlass/arch/arch.h", "cutlass/arch/mma.h", "cutlass/layout/matrix.h", "cutlass/gemm/device/gemm.h", "cutlass/gemm/device/gemm_universal_adapter.h", "cutlass/gemm/kernel/default_gemm_universal.h", ] self.builtin_epilogue_functor_template = """ ${epilogue_functor}< ${element_c}, ${epilogue_vector_length}, ${element_accumulator}, ${element_epilogue} > """ self.gemm_template = """ // Gemm operator ${operation_name} using ${operation_name}_base = typename cutlass::gemm::kernel::DefaultGemmUniversal< ${element_b}, ${layout_b}, ${transform_b}, ${align_b}, // transposed B operand ${element_a}, ${layout_a}, ${transform_a}, ${align_a}, // transposed A operand ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}, ${swizzling_functor}, ${stages}, ${math_operation} >::GemmKernel; // Define named type struct ${operation_name}${operation_suffix} : public ${operation_name}_base { }; """ self.gemm_template_interleaved = """ // Gemm operator ${operation_name} using ${operation_name}_base = typename cutlass::gemm::kernel::DefaultGemmUniversal< ${element_a}, ${layout_a}, ${transform_a}, ${align_a}, ${element_b}, ${layout_b}, ${transform_b}, ${align_b}, ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}, ${swizzling_functor}, ${stages}, ${math_operation} >::GemmKernel; // Define named type struct ${operation_name}${operation_suffix} : public ${operation_name}_base { }; """ # def instance_template(self): return """ ${compile_guard_start} manifest.append(new ${gemm_kind}< cutlass::gemm::device::GemmUniversalAdapter<${operation_name}> >("${operation_name}")); ${compile_guard_end} """ # def emit(self, operation): threadblock_shape = operation.tile_description.threadblock_shape warp_count = operation.tile_description.warp_count warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)] transpose_layouts = { LayoutType.ColumnMajor: LayoutType.RowMajor, LayoutType.RowMajor: LayoutType.ColumnMajor } if operation.A.layout in transpose_layouts.keys() and \ operation.B.layout in transpose_layouts.keys() and \ operation.C.layout in transpose_layouts.keys(): instance_layout_A = transpose_layouts[operation.A.layout] instance_layout_B = transpose_layouts[operation.B.layout] instance_layout_C = transpose_layouts[operation.C.layout] gemm_template = self.gemm_template else: instance_layout_A, instance_layout_B, instance_layout_C = \ (operation.A.layout, operation.B.layout, operation.C.layout) gemm_template = self.gemm_template_interleaved # # Support built-in epilogue functors or user-defined functions if isinstance(operation.epilogue_functor, enum.Enum): epilogue_vector_length = \ min(operation.C.alignment * DataTypeSize[operation.C.element], 128) // DataTypeSize[operation.C.element] values = { 'epilogue_vector_length': str(epilogue_vector_length), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], } epilogue_functor = SubstituteTemplate(self.builtin_epilogue_functor_template, values) else: epilogue_functor = self.epilogue_functor.emit_declaration() # values = { 'operation_name': operation.procedural_name(), 'operation_suffix': self.operation_suffix, 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[instance_layout_A], 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[instance_layout_B], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[instance_layout_C], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'epilogue_functor': epilogue_functor, 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], 'stages': str(operation.tile_description.stages), 'align_a': str(operation.A.alignment), 'align_b': str(operation.B.alignment), 'transform_a': ComplexTransformTag[operation.A.complex_transform], 'transform_b': ComplexTransformTag[operation.B.complex_transform], 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation] } return SubstituteTemplate(gemm_template, values) ################################################################################################### class EmitGemmUniversal3xInstance: ''' Responsible for emitting a CUTLASS 3.x template definition''' def __init__(self, operation_suffix = ''): self.operation_suffix = operation_suffix self.includes = [ "cutlass/cutlass.h", "cutlass/gemm/gemm.h", "cutlass/numeric_types.h", "cutlass/gemm/kernel/gemm_universal.hpp", "cutlass/gemm/collective/collective_builder.hpp", "cutlass/epilogue/collective/collective_builder.hpp", ] self.builtin_epilogue_functor_template = """ ${epilogue_functor}< ${element_d}, ${element_epilogue}, ${element_c}, ${element_epilogue} > """ self.gemm_template = """ using ${operation_name}_epilogue = typename cutlass::epilogue::collective::CollectiveBuilder< ${arch}, ${opcode_class_epi}, cute::Shape<cute::_${tile_shape_epi_m}, cute::_${tile_shape_epi_n}, cute::_${tile_shape_epi_k}>, cute::Shape<${cluster_shape_m}, ${cluster_shape_n}, ${cluster_shape_k}>, ${epi_tile_mn}, ${element_accumulator}, ${element_epilogue}, ${element_c}, ${layout_c}, ${align_c}, ${element_d}, ${layout_d}, ${align_d}, ${epilogue_schedule}, ${epilogue_functor} >::CollectiveOp; using ${operation_name}_mainloop = typename cutlass::gemm::collective::CollectiveBuilder< ${arch}, ${opcode_class_main}, ${element_a}, ${layout_a}, ${align_a}, ${element_b}, ${layout_b}, ${align_b}, ${element_accumulator}, cute::Shape<cute::_${tile_shape_main_m}, cute::_${tile_shape_main_n}, cute::_${tile_shape_main_k}>, cute::Shape<${cluster_shape_m}, ${cluster_shape_n}, ${cluster_shape_k}>, ${stages}, ${kernel_schedule} >::CollectiveOp; // Gemm operator ${operation_name} using ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal< cute::Shape<int,int,int,int>, ${operation_name}_mainloop, ${operation_name}_epilogue, ${tile_scheduler}>; // Define named type struct ${operation_name} : public ${operation_name}_base { }; """ # def instance_template(self): return """ ${compile_guard_start} { using GemmKernel = cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>; manifest.append( new ${gemm_kind}<GemmKernel>("${operation_name}")); } ${compile_guard_end} """ # def emit(self, operation): _LOGGER.debug("*** EmitGemmConfigurationLibrary::emit(operation)") _LOGGER.debug("*** operation.procedural_name(): " + operation.procedural_name()) _LOGGER.debug("*** tile_shape: " + str(operation.tile_description.tile_shape)) _LOGGER.debug("*** warp_count: " + str(operation.tile_description.warp_count)) opcode_class_main = operation.tile_description.math_instruction.opcode_class opcode_class_epi = opcode_class_main tile_shape = operation.tile_description.tile_shape instruction_shape = operation.tile_description.math_instruction.instruction_shape cluster_m = operation.tile_description.cluster_shape[0] cluster_n = operation.tile_description.cluster_shape[1] tile_shape_main_m, tile_shape_main_n, tile_shape_main_k = tile_shape tile_shape_epi_m, tile_shape_epi_n, tile_shape_epi_k = tile_shape # account for static/dynamic cluster shapes cta_m = tile_shape[0] // cluster_m if cluster_m > 0 else tile_shape[0] cta_n = tile_shape[1] // cluster_n if cluster_n > 0 else tile_shape[1] # stage count set to zero indicates builder automatic stage selection if operation.tile_description.stages > 0: stage_count_string = f"cutlass::gemm::collective::StageCount<{str(operation.tile_description.stages)}>" else: stage_count_string = f"cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename {str(operation.procedural_name())}_epilogue::SharedStorage))>" epi_tile_mn = "cutlass::epilogue::collective::EpilogueTileAuto" instance_layout_A, instance_layout_B, instance_layout_C , instance_layout_D = \ (operation.A.layout, operation.B.layout, operation.C.layout, operation.D.layout) # 3.0 profiler integration only supports trivial epilogues for now epilogue_vector_length = 1 # Support built-in epilogue functors or user-defined functions if isinstance(operation.epilogue_functor, enum.Enum): values = { 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'epilogue_functor': EpilogueFunctor3xTag[operation.epilogue_functor], } epilogue_functor = SubstituteTemplate(self.builtin_epilogue_functor_template, values) else: epilogue_functor = self.epilogue_functor.emit_declaration() # # Cutlass3x complex kernels' ElementA(B) is a tuple in collective mainloop builder, e.g. cute::tuple<Element, Transform>, Transform : cute::identity / cute::conjugate. element_a = DataTypeTag[operation.A.element] if not operation.is_complex() else f"cute::tuple<{str(DataTypeTag[operation.A.element])},{str(ComplexTransformTag3x[operation.A.complex_transform])}>" element_b = DataTypeTag[operation.B.element] if not operation.is_complex() else f"cute::tuple<{str(DataTypeTag[operation.B.element])},{str(ComplexTransformTag3x[operation.B.complex_transform])}>" epilogue_schedule_type = EpilogueScheduleTag[operation.epilogue_schedule] values = { 'operation_name': operation.procedural_name(), 'operation_suffix': self.operation_suffix, 'element_a': element_a, 'layout_a': LayoutTag[instance_layout_A], 'element_b': element_b, 'layout_b': LayoutTag[instance_layout_B], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[instance_layout_C], 'element_d': DataTypeTag[operation.D.element], 'layout_d': LayoutTag[instance_layout_D], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class_main': OpcodeClassTag[opcode_class_main], 'opcode_class_epi': OpcodeClassTag[opcode_class_epi], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'tile_shape_epi_m': str(tile_shape_epi_m), 'tile_shape_epi_n': str(tile_shape_epi_n), 'tile_shape_epi_k': str(tile_shape_epi_k), 'tile_shape_main_m': str(tile_shape_main_m), 'tile_shape_main_n': str(tile_shape_main_n), 'tile_shape_main_k': str(tile_shape_main_k), 'cluster_shape_m': 'cute::_' + str(operation.tile_description.cluster_shape[0]) if operation.tile_description.cluster_shape[0] > 0 else "int", 'cluster_shape_n': 'cute::_' + str(operation.tile_description.cluster_shape[1]) if operation.tile_description.cluster_shape[1] > 0 else "int", 'cluster_shape_k': 'cute::_' + str(operation.tile_description.cluster_shape[2]) if operation.tile_description.cluster_shape[2] > 0 else "int", 'instruction_shape_m': str(instruction_shape[0]), 'instruction_shape_n': str(instruction_shape[1]), 'instruction_shape_k': str(instruction_shape[2]), 'kernel_schedule' : str(KernelScheduleTag[operation.kernel_schedule]), 'epilogue_schedule' : str(epilogue_schedule_type), 'epi_tile_mn' : epi_tile_mn, 'epilogue_functor': epilogue_functor, 'stages': stage_count_string, 'align_a': str(operation.A.alignment), 'align_b': str(operation.B.alignment), 'align_c': str(operation.C.alignment), 'align_d': str(operation.C.alignment), 'transform_a': ComplexTransformTag[operation.A.complex_transform], 'transform_b': ComplexTransformTag[operation.B.complex_transform], 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], 'epilogue_vector_length': str(epilogue_vector_length), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'tile_scheduler': str(TileSchedulerTag[operation.tile_scheduler]), } return SubstituteTemplate(self.gemm_template, values) ################################################################################################### # class EmitGemmPlanarComplexInstance: ''' Responsible for emitting a CUTLASS template definition''' def __init__(self, operation_suffix = ''): self.operation_suffix = operation_suffix self.includes = [] self.template = """ // Gemm operator ${operation_name} using Operation_${operation_name} = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< ${element_a}, ${layout_a}, ${transform_a}, ${alignment_a}, ${element_b}, ${layout_b}, ${transform_b}, ${alignment_b}, ${element_c}, cutlass::layout::RowMajor, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, cutlass::epilogue::thread::LinearCombinationPlanarComplex< ${element_c}, ${alignment_c}, ${element_accumulator}, ${element_epilogue} >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, ${stages}, ${math_operator} >::GemmKernel; struct ${operation_name} : public Operation_${operation_name} { }; """ # def instance_template(self): return """ ${compile_guard_start} manifest.append(new ${gemm_kind}< cutlass::gemm::device::GemmUniversalAdapter<${operation_name}> >("${operation_name}")); ${compile_guard_end} """ # def emit(self, operation): warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)] # exchange and transpose A and B types, layouts, and complex transforms since the C layout is row-major transposed_layout_A = TransposedLayout[operation.A.layout] transposed_layout_B = TransposedLayout[operation.B.layout] values = { 'operation_name': operation.procedural_name(), 'element_a': DataTypeTag[operation.B.element], 'layout_a': LayoutTag[transposed_layout_B], 'transform_a': ComplexTransformTag[operation.B.complex_transform], 'alignment_a': str(operation.B.alignment), 'element_b': DataTypeTag[operation.A.element], 'layout_b': LayoutTag[transposed_layout_A], 'transform_b': ComplexTransformTag[operation.A.complex_transform], 'alignment_b': str(operation.A.alignment), 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[operation.C.layout], 'element_accumulator': DataTypeTag[operation.tile_description.math_instruction.element_accumulator], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'alignment_c': str(operation.C.alignment), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'stages': str(operation.tile_description.stages), 'math_operator': 'cutlass::arch::OpMultiplyAdd' } return SubstituteTemplate(self.template, values) ################################################################################################### # class EmitGemmPlanarComplexArrayInstance: ''' Responsible for emitting a CUTLASS template definition''' def __init__(self, operation_suffix = ''): self.operation_suffix = operation_suffix self.includes = [] self.template = """ // Gemm operator ${operation_name} using Operation_${operation_name} = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal< ${element_a}, ${layout_a}, ${transform_a}, ${alignment_a}, ${element_b}, ${layout_b}, ${transform_b}, ${alignment_b}, ${element_c}, cutlass::layout::RowMajor, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, cutlass::epilogue::thread::LinearCombinationPlanarComplex< ${element_c}, ${alignment_c}, ${element_accumulator}, ${element_epilogue} >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, ${stages}, ${math_operator} >::GemmArrayKernel; struct ${operation_name} : public Operation_${operation_name} { }; """ # def instance_template(self): return """ ${compile_guard_start} manifest.append(new ${gemm_kind}< cutlass::gemm::device::GemmUniversalAdapter<${operation_name}> >("${operation_name}")); ${compile_guard_end} """ # def emit(self, operation): warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)] # exchange and transpose A and B types, layouts, and complex transforms since the C layout is row-major transposed_layout_A = TransposedLayout[operation.A.layout] transposed_layout_B = TransposedLayout[operation.B.layout] values = { 'operation_name': operation.procedural_name(), 'element_a': DataTypeTag[operation.B.element], 'layout_a': LayoutTag[transposed_layout_B], 'transform_a': ComplexTransformTag[operation.B.complex_transform], 'alignment_a': str(operation.B.alignment), 'element_b': DataTypeTag[operation.A.element], 'layout_b': LayoutTag[transposed_layout_A], 'transform_b': ComplexTransformTag[operation.A.complex_transform], 'alignment_b': str(operation.A.alignment), 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[operation.C.layout], 'element_accumulator': DataTypeTag[operation.tile_description.math_instruction.element_accumulator], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'alignment_c': str(operation.C.alignment), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'stages': str(operation.tile_description.stages), 'math_operator': 'cutlass::arch::OpMultiplyAdd' } return SubstituteTemplate(self.template, values) ################################################################################################### # class EmitGemmGroupedInstance: ''' Responsible for emitting a CUTLASS template definition''' def __init__(self, operation_suffix = ''): self.operation_suffix = operation_suffix self.includes = [ "cutlass/cutlass.h", "cutlass/numeric_types.h", "cutlass/arch/arch.h", "cutlass/arch/mma.h", "cutlass/layout/matrix.h", "cutlass/gemm/device/gemm.h", "cutlass/gemm/kernel/gemm_grouped.h", "cutlass/gemm/kernel/default_gemm_grouped.h", "cutlass/gemm/device/gemm_grouped.h" ] self.builtin_epilogue_functor_template = """ ${epilogue_functor}< ${element_c}, ${epilogue_vector_length}, ${element_accumulator}, ${element_epilogue} > """ self.gemm_template = """ // Gemm operator ${operation_name} using ${operation_name}_base = typename cutlass::gemm::kernel::DefaultGemmGrouped< ${element_a}, ${layout_a}, ${transform_a}, ${align_a}, ${element_b}, ${layout_b}, ${transform_b}, ${align_b}, ${element_c}, ${layout_c}, ${element_accumulator}, ${opcode_class}, ${arch}, cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>, cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>, cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>, ${epilogue_functor}, ${swizzling_functor}, ${stages}, ${scheduler_mode}, ${math_operation} >::GemmKernel; // Define named type struct ${operation_name}${operation_suffix} : public ${operation_name}_base { }; """ # def instance_template(self): return """ ${compile_guard_start} manifest.append(new ${gemm_kind}< cutlass::gemm::device::GemmGrouped<${operation_name}> >("${operation_name}")); ${compile_guard_end} """ # def emit(self, operation): threadblock_shape = operation.tile_description.threadblock_shape warp_count = operation.tile_description.warp_count warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)] transpose_layouts = { LayoutType.ColumnMajor: LayoutType.RowMajor, LayoutType.RowMajor: LayoutType.ColumnMajor } instance_layout_A, instance_layout_B, instance_layout_C = \ (operation.A.layout, operation.B.layout, operation.C.layout) # # Support built-in epilogue functors or user-defined functions if isinstance(operation.epilogue_functor, enum.Enum): epilogue_vector_length = \ min(operation.C.alignment * DataTypeSize[operation.C.element], 128) // DataTypeSize[operation.C.element] values = { 'epilogue_vector_length': str(epilogue_vector_length), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], } epilogue_functor = SubstituteTemplate(self.builtin_epilogue_functor_template, values) else: epilogue_functor = self.epilogue_functor.emit_declaration() # values = { 'operation_name': operation.procedural_name(), 'operation_suffix': self.operation_suffix, 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[instance_layout_A], 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[instance_layout_B], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[instance_layout_C], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': "cutlass::arch::Sm%d" % operation.arch, 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'epilogue_functor': epilogue_functor, 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], 'stages': str(operation.tile_description.stages), 'align_a': str(operation.A.alignment), 'align_b': str(operation.B.alignment), 'transform_a': ComplexTransformTag[operation.A.complex_transform], 'transform_b': ComplexTransformTag[operation.B.complex_transform], 'scheduler_mode': GroupScheduleModeTag[operation.scheduler_mode], 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation] } return SubstituteTemplate(self.gemm_template, values) ################################################################################################### # # Emitters functions for all targets # ################################################################################################### class EmitGemmConfigurationLibrary: def __init__(self, operation_path, configuration_name): self.configuration_name = configuration_name self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/') self.instance_emitter = { GemmKind.Gemm: EmitGemmInstance, GemmKind.Sparse: EmitSparseGemmInstance, GemmKind.Universal: EmitGemmUniversalInstance, GemmKind.Universal3x: EmitGemmUniversal3xInstance, GemmKind.PlanarComplex: EmitGemmPlanarComplexInstance, GemmKind.PlanarComplexArray: EmitGemmPlanarComplexArrayInstance, GemmKind.Grouped: EmitGemmGroupedInstance } self.gemm_kind_wrappers = { GemmKind.Gemm: 'GemmOperation', GemmKind.Sparse: 'GemmSparseOperation', GemmKind.Universal: 'GemmUniversalOperation', GemmKind.Universal3x: 'GemmUniversal3xOperation', GemmKind.PlanarComplex: 'GemmPlanarComplexOperation', GemmKind.PlanarComplexArray: 'GemmPlanarComplexArrayOperation', GemmKind.Grouped: 'GemmGroupedOperation' } self.wmma_guard_start = "#if defined(CUTLASS_ARCH_WMMA_SM${sm_number}_ENABLED)" self.separator = """ /////////////////////////////////////////////////////////////////////////////////////////////////// """ self.header_template = """ /* Generated by gemm_operation.py - Do not edit. */ """ self.initialize_function_template = """ /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace library { /////////////////////////////////////////////////////////////////////////////////////////////////// void initialize_${configuration_name}(Manifest &manifest) { """ self.epilogue_template = """ } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////// """ def __enter__(self): _LOGGER.debug("*** EmitGemmConfigurationLibrary::__enter__") _LOGGER.debug("*** configuration_path (file to write): " + str(self.configuration_path)) self.configuration_file = open(self.configuration_path, "w") self.configuration_file.write(self.header_template) self.configuration_file.write(self.separator) self.includes = collections.OrderedDict([ ("cutlass/cutlass.h", None), ("cutlass/library/library.h", None), ("cutlass/library/manifest.h", None), ("library_internal.h", None), ("gemm_operation.h", None), ("gemm_operation_3x.hpp", None), ("cutlass/arch/wmma.h", None), ("cutlass/numeric_types.h", None) ]) self.instance_definitions = [] self.instance_wrappers = [] self.operations = [] return self def emit(self, operation): _LOGGER.debug("*** EmitGemmConfigurationLibrary::emit(operation)") _LOGGER.debug("*** operation.gemm_kind: " + str(operation.gemm_kind)) emitter = self.instance_emitter[operation.gemm_kind]() for incl in emitter.includes: self.includes[incl] = None self.operations.append(operation) self.instance_definitions.append(emitter.emit(operation)) self.instance_wrappers.append(SubstituteTemplate(emitter.instance_template(), { 'configuration_name': self.configuration_name, 'operation_name': operation.procedural_name(), 'gemm_kind': self.gemm_kind_wrappers[operation.gemm_kind], 'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \ if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "", 'compile_guard_end': "#endif" \ if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "" })) def __exit__(self, exception_type, exception_value, traceback): # Write includes for incl, _ in self.includes.items(): include_statement = "#include \"%s\"\n" % incl self.configuration_file.write(include_statement) self.configuration_file.write(self.separator) # Write instance definitions in top-level namespace for instance_definition in self.instance_definitions: self.configuration_file.write(instance_definition) # Add wrapper objects within initialize() function self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, { 'configuration_name': self.configuration_name })) for instance_wrapper in self.instance_wrappers: self.configuration_file.write(instance_wrapper) self.configuration_file.write(self.epilogue_template) self.configuration_file.close() ################################################################################################### ###################################################################################################
python/cutlass_library/gemm_operation.py/0
{ "file_path": "python/cutlass_library/gemm_operation.py", "repo_id": "python", "token_count": 19947 }
47
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-check" width="44" height="44" viewBox="0 0 24 24" stroke-width="2" stroke="#22863a" fill="none" stroke-linecap="round" stroke-linejoin="round"> <path stroke="none" d="M0 0h24v24H0z" fill="none"/> <path d="M5 12l5 5l10 -10" /> </svg>
python/docs/_static/check-solid.svg/0
{ "file_path": "python/docs/_static/check-solid.svg", "repo_id": "python", "token_count": 130 }
48
/* remove conflicting styling from Sphinx themes */ div.nbinput.container div.prompt *, div.nboutput.container div.prompt *, div.nbinput.container div.input_area pre, div.nboutput.container div.output_area pre, div.nbinput.container div.input_area .highlight, div.nboutput.container div.output_area .highlight { border: none; padding: 0; margin: 0; box-shadow: none; } div.nbinput.container > div[class*=highlight], div.nboutput.container > div[class*=highlight] { margin: 0; } div.nbinput.container div.prompt *, div.nboutput.container div.prompt * { background: none; } div.nboutput.container div.output_area .highlight, div.nboutput.container div.output_area pre { background: unset; } div.nboutput.container div.output_area div.highlight { color: unset; /* override Pygments text color */ } /* avoid gaps between output lines */ div.nboutput.container div[class*=highlight] pre { line-height: normal; } /* input/output containers */ div.nbinput.container, div.nboutput.container { display: -webkit-flex; display: flex; align-items: flex-start; margin: 0; width: 100%; } @media (max-width: 540px) { div.nbinput.container, div.nboutput.container { flex-direction: column; } } /* input container */ div.nbinput.container { padding-top: 5px; } /* last container */ div.nblast.container { padding-bottom: 5px; } /* input prompt */ div.nbinput.container div.prompt pre { color: #307FC1; } /* output prompt */ div.nboutput.container div.prompt pre { color: #BF5B3D; } /* all prompts */ div.nbinput.container div.prompt, div.nboutput.container div.prompt { width: 4.5ex; padding-top: 5px; position: relative; user-select: none; } div.nbinput.container div.prompt > div, div.nboutput.container div.prompt > div { position: absolute; right: 0; margin-right: 0.3ex; } @media (max-width: 540px) { div.nbinput.container div.prompt, div.nboutput.container div.prompt { width: unset; text-align: left; padding: 0.4em; } div.nboutput.container div.prompt.empty { padding: 0; } div.nbinput.container div.prompt > div, div.nboutput.container div.prompt > div { position: unset; } } /* disable scrollbars and line breaks on prompts */ div.nbinput.container div.prompt pre, div.nboutput.container div.prompt pre { overflow: hidden; white-space: pre; } /* input/output area */ div.nbinput.container div.input_area, div.nboutput.container div.output_area { -webkit-flex: 1; flex: 1; overflow: auto; } @media (max-width: 540px) { div.nbinput.container div.input_area, div.nboutput.container div.output_area { width: 100%; } } /* input area */ div.nbinput.container div.input_area { border: 1px solid #e0e0e0; border-radius: 2px; /*background: #f5f5f5;*/ } /* override MathJax center alignment in output cells */ div.nboutput.container div[class*=MathJax] { text-align: left !important; } /* override sphinx.ext.imgmath center alignment in output cells */ div.nboutput.container div.math p { text-align: left; } /* standard error */ div.nboutput.container div.output_area.stderr { background: #fdd; } /* ANSI colors */ .ansi-black-fg { color: #3E424D; } .ansi-black-bg { background-color: #3E424D; } .ansi-black-intense-fg { color: #282C36; } .ansi-black-intense-bg { background-color: #282C36; } .ansi-red-fg { color: #E75C58; } .ansi-red-bg { background-color: #E75C58; } .ansi-red-intense-fg { color: #B22B31; } .ansi-red-intense-bg { background-color: #B22B31; } .ansi-green-fg { color: #00A250; } .ansi-green-bg { background-color: #00A250; } .ansi-green-intense-fg { color: #007427; } .ansi-green-intense-bg { background-color: #007427; } .ansi-yellow-fg { color: #DDB62B; } .ansi-yellow-bg { background-color: #DDB62B; } .ansi-yellow-intense-fg { color: #B27D12; } .ansi-yellow-intense-bg { background-color: #B27D12; } .ansi-blue-fg { color: #208FFB; } .ansi-blue-bg { background-color: #208FFB; } .ansi-blue-intense-fg { color: #0065CA; } .ansi-blue-intense-bg { background-color: #0065CA; } .ansi-magenta-fg { color: #D160C4; } .ansi-magenta-bg { background-color: #D160C4; } .ansi-magenta-intense-fg { color: #A03196; } .ansi-magenta-intense-bg { background-color: #A03196; } .ansi-cyan-fg { color: #60C6C8; } .ansi-cyan-bg { background-color: #60C6C8; } .ansi-cyan-intense-fg { color: #258F8F; } .ansi-cyan-intense-bg { background-color: #258F8F; } .ansi-white-fg { color: #C5C1B4; } .ansi-white-bg { background-color: #C5C1B4; } .ansi-white-intense-fg { color: #A1A6B2; } .ansi-white-intense-bg { background-color: #A1A6B2; } .ansi-default-inverse-fg { color: #FFFFFF; } .ansi-default-inverse-bg { background-color: #000000; } .ansi-bold { font-weight: bold; } .ansi-underline { text-decoration: underline; } div.nbinput.container div.input_area div[class*=highlight] > pre, div.nboutput.container div.output_area div[class*=highlight] > pre, div.nboutput.container div.output_area div[class*=highlight].math, div.nboutput.container div.output_area.rendered_html, div.nboutput.container div.output_area > div.output_javascript, div.nboutput.container div.output_area:not(.rendered_html) > img{ padding: 5px; margin: 0; } /* fix copybtn overflow problem in chromium (needed for 'sphinx_copybutton') */ div.nbinput.container div.input_area > div[class^='highlight'], div.nboutput.container div.output_area > div[class^='highlight']{ overflow-y: hidden; } /* hide copybtn icon on prompts (needed for 'sphinx_copybutton') */ .prompt .copybtn { display: none; } /* Some additional styling taken form the Jupyter notebook CSS */ .jp-RenderedHTMLCommon table, div.rendered_html table { border: none; border-collapse: collapse; border-spacing: 0; color: black; font-size: 12px; table-layout: fixed; } .jp-RenderedHTMLCommon thead, div.rendered_html thead { border-bottom: 1px solid black; vertical-align: bottom; } .jp-RenderedHTMLCommon tr, .jp-RenderedHTMLCommon th, .jp-RenderedHTMLCommon td, div.rendered_html tr, div.rendered_html th, div.rendered_html td { text-align: right; vertical-align: middle; padding: 0.5em 0.5em; line-height: normal; white-space: normal; max-width: none; border: none; } .jp-RenderedHTMLCommon th, div.rendered_html th { font-weight: bold; } .jp-RenderedHTMLCommon tbody tr:nth-child(odd), div.rendered_html tbody tr:nth-child(odd) { background: #f5f5f5; } .jp-RenderedHTMLCommon tbody tr:hover, div.rendered_html tbody tr:hover { background: rgba(66, 165, 245, 0.2); }
python/docs/_static/nbsphinx-code-cells.css/0
{ "file_path": "python/docs/_static/nbsphinx-code-cells.css", "repo_id": "python", "token_count": 2670 }
49
/* body[data-theme] { */ :root { --tabs--label-text: #4b5563; --tabs--label-text--hover: #4b5563; --tabs--label-text--active: #0ea5e9; --tabs--label-text--active--hover: #0ea5e9; --tabs--label-background: transparent; --tabs--label-background--hover: transparent; --tabs--label-background--active: transparent; --tabs--label-background--active--hover: transparent; --tabs--label-border: transparent; --tabs--label-border--hover: #d1d5db; --tabs--label-border--active: #0ea5e9; --tabs--label-border--active--hover: #0ea5e9; --tabs--padding-x: 1.25em; --tabs--margin-x: 0; --tabs--border: #e6e6e6; } /* Hide radio buttons */ .tab-set > input { position: absolute; opacity: 0; } /* Tab set container */ .tab-set { border-radius: 2px; display: flex; flex-wrap: wrap; margin: 0.75em 0; position: relative; } /* Tab label */ .tab-set > label { z-index: 1; width: auto; border-bottom: 2px solid var(--tabs--label-border); padding: 1em var(--tabs--padding-x) 0.5em; margin-left: var(--tabs--margin-x); color: var(--tabs--label-text); background: var(--tabs--label-background); transition: color 250ms; cursor: pointer; font-size: 0.875em; font-weight: 700; } .tab-set > label:nth-child(2) { margin-left: 0; } /* Hovered label */ .tab-set > label:hover { color: var(--tabs--label-text--hover); background: var(--tabs--label-background--hover); border-color: var(--tabs--label-border--hover); } /* Active tab label */ .tab-set > input:checked + label { color: var(--tabs--label-text--active); background: var(--tabs--label-background--active); border-color: var(--tabs--label-border--active); } .tab-set > input:checked + label:hover { color: var(--tabs--label-text--active--hover); background: var(--tabs--label-background--active--hover); border-color: var(--tabs--label-border--active--hover); } /* Tab content */ .tab-content { order: 99; display: none; width: 100%; box-shadow: 0 -0.0625rem var(--tabs--border); } /* Show content, when input is checked. */ .tab-set > input:checked + label + .tab-content { display: block; } .tab-content > p:first-child { margin-top: 0.75rem; } /* Remove the top border on first code block */ .tab-content > [class^="highlight-"]:first-child .highlight { border-top: none; border-top-left-radius: 0; border-top-right-radius: 0; } /* Remove margins on children */ .tab-content > *:first-child { margin-top: 0; } .tab-content > *:last-child { margin-bottom: 0; } /* Remove margins on nested tabs */ .tab-content > .tab-set { margin: 0; }
python/docs/_static/tabs.css/0
{ "file_path": "python/docs/_static/tabs.css", "repo_id": "python", "token_count": 989 }
50
CUTLASS ======= Subpackages ----------- .. toctree:: :maxdepth: 1 cutlass.emit cutlass.op cutlass.utils Epilogue -------- .. automodule:: cutlass.epilogue :members: :undoc-members: :show-inheritance: Library Defaults ---------------- .. automodule:: cutlass.library_defaults :members: :undoc-members: :show-inheritance: Swizzle ---------- .. automodule:: cutlass.swizzle :members: :undoc-members: :show-inheritance:
python/docs_src/source/cutlass.rst/0
{ "file_path": "python/docs_src/source/cutlass.rst", "repo_id": "python", "token_count": 185 }
51
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Low-level functionality tests for GEMM with F16 operands on SM80 """ from functools import partial import logging import unittest import cutlass from cutlass.backend.utils.device import device_cc from utils import LayoutCombination, add_test_gemm cutlass.set_log_level(logging.WARNING) cc = 80 dtype = cutlass.DataType.f16 @unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.') @unittest.skipIf(cutlass.utils.datatypes.torch_type(dtype) is None, f'Version of torch installed does not contain a datatype match for {dtype}') class GemmF16Sm80(unittest.TestCase): """ Wrapper class to which tests will be added dynamically in __main__ """ pass @unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.') @unittest.skipIf(cutlass.utils.datatypes.torch_type(dtype) is None, f'Version of torch installed does not contain a datatype match for {dtype}') class GemmF16Sm80StreamK(unittest.TestCase): """ Wrapper class to which tests will be added dynamically in __main__ """ pass add_test_specialized = partial(add_test_gemm, element=dtype, cc=cc, cluster_shape=[1, 1, 1]) # Tests using TensorOp add_test_tensorop = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.NNN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.NNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.NTN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.NTT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TTN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TTT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 128, 32], warp_count=[1, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 64, 32], warp_count=[2, 1, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 64], warp_count=[1, 1, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[4, 4, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[4, 4, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 64], warp_count=[1, 1, 1], stages=5) add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[2, 2, 2], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) # Tests using SIMT add_test_simt = partial(add_test_specialized, opclass=cutlass.OpcodeClass.Simt) add_test_simt(cls=GemmF16Sm80, layouts=LayoutCombination.NNN, alignments=[1, 1, 1], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 8], warp_count=[2, 2, 1], stages=2) add_test_simt(cls=GemmF16Sm80, layouts=LayoutCombination.TNN, alignments=[1, 1, 1], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 128, 8], warp_count=[1, 2, 1], stages=2) add_test_simt(cls=GemmF16Sm80, layouts=LayoutCombination.NTN, alignments=[1, 1, 1], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 64, 8], warp_count=[2, 1, 1], stages=2) add_test_simt(cls=GemmF16Sm80, layouts=LayoutCombination.TTN, alignments=[1, 1, 1], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 8], warp_count=[1, 1, 1], stages=2) add_test_simt(cls=GemmF16Sm80, layouts=LayoutCombination.NNT, alignments=[1, 1, 1], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 8], warp_count=[2, 2, 1], stages=2) # Stream K tests add_test_streamk = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp, swizzle=cutlass.swizzle.ThreadblockSwizzleStreamK) add_test_streamk(cls=GemmF16Sm80StreamK, layouts=LayoutCombination.NNN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3) add_test_streamk(cls=GemmF16Sm80StreamK, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16, element_C=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 64], warp_count=[1, 1, 1], stages=5) if __name__ == '__main__': unittest.main()
test/python/cutlass/gemm/gemm_f16_sm80.py/0
{ "file_path": "test/python/cutlass/gemm/gemm_f16_sm80.py", "repo_id": "test", "token_count": 3797 }
52
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Helper functions & classes for interface test """ class ExpectException: """ Utility class to assert that an exception was raised when expected Example: .. highlight:: python .. code-block:: python with ExceptionExpected(True, 'Division by zero'): x = 1.0 / 0.0 :param exception_expected: whether an exception is expected to be raised :type exception_expected: bool :param message: message to print if an exception is raised when not expected or vice versa :type message: str """ def __init__(self, exception_expected: bool, message: str = '', verify_msg=False): self.exception_expected = exception_expected self.message = message self.verify_msg = verify_msg def __enter__(self): return self def __exit__(self, exc_type, exc_val, traceback): exception_raised = exc_type is not None assert self.exception_expected == exception_raised, self.message if self.verify_msg: exc_message = f"{exc_type.__name__}: {exc_val}" assert exc_message == self.message, f"expect error message {self.message}, got {exc_message}" # Suppress the exception return True
test/python/cutlass/interface/utils.py/0
{ "file_path": "test/python/cutlass/interface/utils.py", "repo_id": "test", "token_count": 897 }
53
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "cutlass_unit_test.h" #include <iostream> #include <iomanip> #include <utility> #include <type_traits> #include <vector> #include <numeric> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> using namespace cute; __global__ void test(double const* g_in, double* g_out) { extern __shared__ double smem[]; smem[threadIdx.x] = g_in[threadIdx.x]; __syncthreads(); g_out[threadIdx.x] = 2 * smem[threadIdx.x]; } __global__ void test2(double const* g_in, double* g_out) { using namespace cute; extern __shared__ double smem[]; auto s_tensor = make_tensor(make_smem_ptr(smem + threadIdx.x), Int<1>{}); auto g_tensor = make_tensor(make_gmem_ptr(g_in + threadIdx.x), Int<1>{}); copy(g_tensor, s_tensor); cp_async_fence(); cp_async_wait<0>(); __syncthreads(); g_out[threadIdx.x] = 2 * smem[threadIdx.x]; } TEST(SM80_CuTe_Ampere, CpAsync) { constexpr int count = 32; thrust::host_vector<double> h_in(count); for (int i = 0; i < count; ++i) { h_in[i] = double(i); } thrust::device_vector<double> d_in(h_in); thrust::device_vector<double> d_out(count, -1); test<<<1, count, sizeof(double) * count>>>( thrust::raw_pointer_cast(d_in.data()), thrust::raw_pointer_cast(d_out.data())); thrust::host_vector<double> h_result = d_out; thrust::device_vector<double> d_out_cp_async(count, -2); test2<<<1, count, sizeof(double) * count>>>( thrust::raw_pointer_cast(d_in.data()), thrust::raw_pointer_cast(d_out_cp_async.data())); thrust::host_vector<double> h_result_cp_async = d_out_cp_async; for (int i = 0; i < count; ++i) { EXPECT_EQ(h_result[i], h_result_cp_async[i]); } }
test/unit/cute/ampere/cp_async.cu/0
{ "file_path": "test/unit/cute/ampere/cp_async.cu", "repo_id": "test", "token_count": 1191 }
54
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "cutlass_unit_test.h" #include <cutlass/trace.h> #include <iostream> #include <cute/tensor.hpp> using namespace cute; template <class Layout> void test_left_inverse(Layout const& layout) { auto inv_layout = left_inverse(layout); CUTLASS_TRACE_HOST(layout << " ^ -1\n" << " => \n" << inv_layout); for (int i = 0; i < size(layout); ++i) { //printf("%3d: %3d %3d\n", i, int(layout(i)), int(inv_layout(layout(i)))); EXPECT_EQ(inv_layout(layout(i)), i); } CUTLASS_TRACE_HOST("Composition: " << coalesce(composition(inv_layout, layout))); } TEST(CuTe_core, Inverse_left) { { auto layout = Layout<Shape <_1>, Stride<_0>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <Shape <_1,_1>>, Stride<Stride<_0,_0>>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <_1>, Stride<_1>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <_4>, Stride<_1>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <_4>, Stride<_2>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <_8, _4>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <_8, _4>, Stride<_4, _1>>{}; test_left_inverse(filter(layout)); } { auto layout = Layout<Shape< _2,_4,_6>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <_2,_4,_6>, Stride<_4,_1,_8>>{}; test_left_inverse(layout); } { auto layout = Layout<Shape <_4, _2>, Stride<_1,_16>>{}; test_left_inverse(layout); } // // Swizzle left_inverse // { auto layout = ComposedLayout<Swizzle<1,0,2>, _0, Layout<Shape <_4, _4>, Stride<_1, _4>>>{}; test_left_inverse(layout); } { auto layout = ComposedLayout<Swizzle<1,0,2>, _0, Layout<Shape <_4, _4>, Stride<_4, _1>>>{}; test_left_inverse(layout); } { auto layout = ComposedLayout<Swizzle<1,0,1>, _0, Layout<Shape <_4, _4>, Stride<_8, _1>>>{}; test_left_inverse(layout); } // // Negative strides (beta support) // Post-conditions/layout indexing aren't generalized enough to support these yet // However, the composition post-condition is general enough. { auto layout = make_layout(Shape<_4>{}, Stride<Int<-1>>{}); test_left_inverse(layout); } //{ //auto layout = Layout<Shape < _2,_4>, // Stride<_m1,_2>>{}; //test_left_inverse(layout); //} //{ //auto layout = Layout<Shape < _2, _4>, // Stride< _4,_m1>>{}; //test_left_inverse(layout); //} //{ //auto layout = Layout<Shape < _2, _4, _6>, // Stride<_m1,_12,_m2>>{}; //test_left_inverse(layout); //} }
test/unit/cute/core/inverse_left.cpp/0
{ "file_path": "test/unit/cute/core/inverse_left.cpp", "repo_id": "test", "token_count": 1980 }
55
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass_unit_test.h" #include <iostream> #include <cstdint> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> namespace cutlass::test { template <class ElementType, class SmemLayout> struct SharedStorage { cute::ArrayEngine<ElementType, cute::cosize_v<SmemLayout>> smem; alignas(16) cute::uint64_t tma_load_mbar[1]; }; #if CUDA_12_0_SM90_FEATURES_SUPPORTED template <class T, class TiledCopy, class CTA_Tiler, class GmemLayout, class SmemLayout> __global__ void tma_test_device_cute(T const* g_in, T* g_out, CUTE_GRID_CONSTANT TiledCopy const tma, CTA_Tiler cta_tiler, GmemLayout gmem_layout, SmemLayout smem_layout) { using namespace cute; CUTE_STATIC_ASSERT_V(product_each(shape(cta_tiler)) == product_each(shape(smem_layout))); // Use Shared Storage structure to allocate and distribute aligned SMEM addresses extern __shared__ char shared_memory[]; using SharedStorage = SharedStorage<T, SmemLayout>; SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory); // Construct SMEM tensor Tensor sA = make_tensor(make_smem_ptr(shared_storage.smem.begin()), smem_layout); // (CTA_TILE_M,CTA_TILE_N,...) // Shared memory barriers use 64bits in SMEM for synchronization uint64_t* tma_load_mbar = shared_storage.tma_load_mbar; // TMA requires special handling of strides to deal with coord codomain mapping // Represent the full tensors -- get these from TMA Tensor mA = tma.get_tma_tensor(shape(gmem_layout)); Tensor mB = make_tensor(make_gmem_ptr<T>(g_out), gmem_layout); constexpr int R = rank_v<CTA_Tiler>; Tensor gA = flat_divide(mA, cta_tiler); // (CTA_TILE_M,CTA_TILE_N,...REST_M,REST_N,...) Tensor gB = flat_divide(mB, cta_tiler); // (CTA_TILE_M,CTA_TILE_N,...REST_M,REST_N,...) // // Prepare the TMA_LOAD // auto cta_tma = tma.get_slice(Int<0>{}); // CTA slice Tensor tAgA_x = cta_tma.partition_S(gA); // (TMA,TMA_M,TMA_N,REST_M,REST_N) Tensor tAsA_x = cta_tma.partition_D(sA); // (TMA,TMA_M,TMA_N) #if 0 if (thread0()) { print(tma); print("TILE : "); print(cta_tiler); print("\n"); print(" mA : "); print( mA); print("\n"); print(" mB : "); print( mB); print("\n"); print(" gA : "); print( gA); print("\n"); print(" gB : "); print( gB); print("\n"); print(" sA : "); print( sA); print("\n"); print("tAgA_x: "); print(tAgA_x); print("\n"); print("tAsA_x: "); print(tAsA_x); print("\n"); } #endif // // Perform the TMA_LOAD // // INPUT: Group the REST_X modes and the TMA_X modes to easily iterate through the tiles Tensor tAgA = group_modes<1,rank(tAgA_x)>(tAgA_x); // (TMA,REST) Tensor tAsA = group_modes<1,rank(tAsA_x)>(tAsA_x); // (TMA,REST) static_assert(size<1>(tAsA) == 1); // OUTPUT: Group the CTA_TILE_X modes and REST_X modes for output Tensor tBgB = group_modes<0,R>(group_modes<R,rank(gB)>(gB)); // (CTA_TILE, REST) #if 0 if (thread0()) { print("tAgA : "); print(tAgA); print("\n"); print("tAsA : "); print(tAsA); print("\n"); print("tBgB : "); print(tBgB); print("\n"); } #endif // Loop over the TMA stages, using smem as our buffer for (int stage = 0; stage < size<1>(tAgA); ++stage) { // Set the bytes transferred in this TMA transaction (may involve multiple issues) constexpr int kTmaTransactionBytes = sizeof(ArrayEngine<T, size(sA)>); if (threadIdx.x == 0) { /// Initialize shared memory barrier tma_load_mbar[0] = 0; cute::initialize_barrier(tma_load_mbar[0], 1 /*numThreads*/); cute::set_barrier_transaction_bytes(tma_load_mbar[0], kTmaTransactionBytes); copy(tma.with(tma_load_mbar[0]), tAgA(_,stage), tAsA(_,0)); } __syncthreads(); /// Wait on the shared memory barrier until the phase bit flips from kPhaseBit value constexpr int kPhaseBit = 0; cute::wait_barrier(tma_load_mbar[0], kPhaseBit); // // Write out trivially smem -> gmem // // Subbyte elements could cause race conditions, so be even more conservative if (thread0()) { copy(sA, tBgB(_,stage)); } __syncthreads(); } } template <class T, class TmaType = T, class CopyOp, class GMEM_Layout, class SMEM_Layout, class CTA_Tile> auto test_tma_load(CopyOp const& copy_op, GMEM_Layout const& gmem_layout, SMEM_Layout const& smem_layout, CTA_Tile const& cta_tile) { using namespace cute; // Allocate and initialize host test data size_t N = ceil_div(cosize(gmem_layout) * sizeof_bits<T>::value, 8); thrust::host_vector<uint8_t> h_in(N); for (size_t i = 0; i < h_in.size(); ++i) { h_in[i] = uint8_t(i % 13); } Tensor hA_in = make_tensor(recast_ptr<T>(h_in.data()), gmem_layout); // Allocate and initialize device test data thrust::device_vector<uint8_t> d_in = h_in; thrust::device_vector<uint8_t> d_out(h_in.size(), uint8_t(-1)); // overflow uint // Create TMA for this device Tensor Tensor gA = make_tensor(make_gmem_ptr<T>(raw_pointer_cast(d_in.data())), gmem_layout); auto tma = make_tma_copy<TmaType>(copy_op, gA, smem_layout, cta_tile, Int<1>{}); //print(tma); // Launch int smem_size = int(sizeof(SharedStorage<T, decltype(smem_layout)>)); tma_test_device_cute<<<1, 128, smem_size>>>( reinterpret_cast<T const*>(raw_pointer_cast(d_in.data())), reinterpret_cast<T*> (raw_pointer_cast(d_out.data())), tma, cta_tile, gmem_layout, smem_layout); // Copy results back to host thrust::host_vector<uint8_t> h_out = d_out; Tensor hA_out = make_tensor(recast_ptr<T>(h_out.data()), gmem_layout); // Validate the results. Print only the first 3 errors. int count = 3; for (int i = 0; i < int(size(hA_out)) && count > 0; ++i) { EXPECT_EQ(hA_in(i), hA_out(i)); if (hA_in(i) != hA_out(i)) { --count; } } return tma; } #endif } // end namespace cutlass::test
test/unit/cute/hopper/tma_load_testbed.hpp/0
{ "file_path": "test/unit/cute/hopper/tma_load_testbed.hpp", "repo_id": "test", "token_count": 3205 }
56
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/half.h" #include "cutlass/epilogue/thread/linear_combination_planar_complex.h" // Tensor Op #include "cutlass/gemm/warp/default_mma_tensor_op.h" // Volta Tensor Op #include "cutlass/gemm/warp/mma_tensor_op_sm70.h" #include "cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h" // Simt #include "cutlass/gemm/warp/mma_simt.h" #include "cutlass/gemm/warp/mma_simt_policy.h" // Epilogue components #include "cutlass/epilogue/threadblock/default_epilogue_planar_complex.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "testbed_planar_complex.h" ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_threadblock_epilogue, planar_complex_f32_f32_tensor_op_64x64_32x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor >::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationPlanarComplex< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpiloguePlanarComplex< Shape, WarpMmaTensorOp, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpiloguePlanarComplexTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_threadblock_epilogue, planar_complex_f16_f32_tensor_op_64x64_32x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor >::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationPlanarComplex< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpiloguePlanarComplex< Shape, WarpMmaTensorOp, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpiloguePlanarComplexTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_threadblock_epilogue, planar_complex_f16_f16_tensor_op_64x64_32x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::half_t; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise< cutlass::sizeof_bits<Element>::value, 64>; using WarpMmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor >::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationPlanarComplex< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpiloguePlanarComplex< Shape, WarpMmaTensorOp, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpiloguePlanarComplexTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_threadblock_epilogue, planar_complex_f32_f32_volta_tensor_op_64x64_32x32x4) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<32, 32, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>; using Element = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<Element>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<Element>::value>; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, Element, cutlass::layout::ColumnMajor, Element, cutlass::layout::RowMajor, ElementAccumulator, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, Element, LayoutA, Element, LayoutB, ElementAccumulator, cutlass::layout::RowMajor, Policy >; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationPlanarComplex< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpiloguePlanarComplex< Shape, WarpMmaTensorOp, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm70, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpiloguePlanarComplexTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_threadblock_epilogue, planar_complex_simt_f32_64x64_32x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = float; using ElementAccumulator = float; using ElementCompute = float; int const kElementsPerAccess = 1; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using Element = float; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using ElementOutput = Element; using ElementAccumulator = Element; using ElementCompute = Element; using WarpMmaSimt = cutlass::gemm::warp::MmaSimt< WarpShape, Element, LayoutA, Element, LayoutB, Element, LayoutC, cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<4, 8>, cutlass::layout::RowMajorInterleaved<2>, cutlass::gemm::GemmShape<4, 4, 1> > >; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationPlanarComplex< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpiloguePlanarComplex< Shape, WarpMmaSimt, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpiloguePlanarComplexTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Epilogue_threadblock_epilogue, planar_complex_simt_f64_64x64_16x32x8) { // // Define the warp-level matrix multiply // using ElementOutput = double; using ElementAccumulator = double; using ElementCompute = double; int const kElementsPerAccess = 1; int const kPartitionsK = 1; using Shape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using Element = double; using ElementC = ElementAccumulator; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using ElementOutput = Element; using ElementAccumulator = Element; using ElementCompute = Element; using WarpMmaSimt = cutlass::gemm::warp::MmaSimt< WarpShape, Element, LayoutA, Element, LayoutB, Element, LayoutC, cutlass::gemm::warp::MmaSimtPolicy< cutlass::MatrixShape<4, 8>, cutlass::layout::RowMajorInterleaved<2>, cutlass::gemm::GemmShape<4, 4, 1> > >; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombinationPlanarComplex< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpiloguePlanarComplex< Shape, WarpMmaSimt, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpiloguePlanarComplexTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } /////////////////////////////////////////////////////////////////////////////////////////////////
test/unit/epilogue/threadblock/epilogue_planar_complex.cu/0
{ "file_path": "test/unit/epilogue/threadblock/epilogue_planar_complex.cu", "repo_id": "test", "token_count": 4846 }
57
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide GEMM interface with elementwise tensor-tensor broadcast epilogue */ #pragma once #include <iostream> #include <fstream> #include <sstream> #include "../../common/cutlass_unit_test.h" #include "testbed_utils.h" #include "gemm_testbed_3x.hpp" namespace test { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Gemm> struct Testbed3xTensorBroadcast { using TestBedImpl = typename detail::TestbedImpl<Gemm>; using Kernel = typename Gemm::GemmKernel; using Epilogue = typename Gemm::GemmKernel::CollectiveEpilogue; using ElementA = typename Kernel::ElementA; using StrideA = typename Kernel::StrideA; using ElementB = typename Kernel::ElementB; using StrideB = typename Kernel::StrideB; using ElementC = typename Kernel::ElementC; using StrideC = typename Kernel::StrideC; using ElementD = typename Kernel::ElementD; using StrideD = typename Kernel::StrideD; using ElementAccumulator = typename Kernel::ElementAccumulator; using ElementCompute = typename Epilogue::ElementCompute; using ElementScalar = typename Epilogue::ElementScalar; using ProblemShapeType = typename Kernel::ProblemShape; using ElementBias = typename Epilogue::ElementBias; using ActivationFunctor = typename Epilogue::ActivationFunctor; static constexpr bool IsBinaryOp0Enabled = Epilogue::IsBinaryOp0Enabled; static constexpr bool IsBinaryOp1Enabled = Epilogue::IsBinaryOp1Enabled; static constexpr bool IsUnaryOpEnabled = Epilogue::IsUnaryOpEnabled; static constexpr bool PerColBias = Epilogue::PerColumnBias; using LayoutTagA = typename TestBedImpl::LayoutTagA; using LayoutTagB = typename TestBedImpl::LayoutTagB; using LayoutTagC = typename TestBedImpl::LayoutTagC; using LayoutTagD = typename TestBedImpl::LayoutTagD; using LayoutTagVector = cutlass::layout::PackedVectorLayout; cutlass::HostTensor<ElementBias, LayoutTagVector> bias; cutlass::HostTensor<ElementC, LayoutTagC> tensor_C1; // tensor_C0 is taken from TestbedImpl's tensor_C // Detail Implementation TestBedImpl impl_; // // Methods // Testbed3xTensorBroadcast( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint64_t seed_ = TestBedImpl::kDefaultSeed ) : impl_(CheckEquality::EXACT, ScalarLoc::ON_DEVICE, VectorBeta::ENABLED, init_A_, init_B_, init_C_, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform, seed_) { } Testbed3xTensorBroadcast( typename LayoutTagA::Stride stride_factor_A_, typename LayoutTagB::Stride stride_factor_B_, typename LayoutTagC::Stride stride_factor_C_, typename LayoutTagD::Stride stride_factor_D_, cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint64_t seed_ = TestBedImpl::kDefaultSeed ) : impl_(stride_factor_A_, stride_factor_B_, stride_factor_C_, stride_factor_D_, CheckEquality::EXACT, ScalarLoc::ON_HOST, VectorBeta::ENABLED, init_A_, init_B_, init_C_, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform, seed_) { } /// Initializes data structures void initialize(ProblemShapeType problem_size) { // // Allocate the GEMM workspace for A/B/C/D tensor // impl_.initialize(problem_size); } void initialize_bias(ProblemShapeType problem_size) { auto problem_shape_MNKL = cute::append<4>(problem_size, 1); auto bias_size = PerColBias ? cute::get<1>(problem_shape_MNKL) : cute::get<0>(problem_shape_MNKL); bias.resize(cutlass::Coord<1>(bias_size)); EXPECT_TRUE(detail::initialize_tensor(bias.host_view(), cutlass::Distribution::Uniform, impl_.collective_mma_inputs.seed + 2023)); bias.sync_device(); } void initialize_c1(ProblemShapeType problem_size) { auto problem_shape_MNKL = cute::append<4>(problem_size, 1); auto M = cute::get<0>(problem_shape_MNKL); auto N = cute::get<1>(problem_shape_MNKL); auto L = cute::get<3>(problem_shape_MNKL); auto c_coord = cutlass::make_Coord(M * L, N); tensor_C1.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(c_coord, impl_.collective_epilogue.stride_factor_C)); EXPECT_TRUE(detail::initialize_tensor(tensor_C1.host_view(), cutlass::Distribution::Uniform, impl_.collective_mma_inputs.seed + 2024)); tensor_C1.sync_device(); } /// Compares computed reference with device reference and outputs to a file if incorrect bool compare_reference( cute::Shape<int,int,int,int> problem_shape_MNKL, ElementScalar alpha, ElementScalar beta, bool use_bias) { auto [M, N, K, L] = problem_shape_MNKL; impl_.collective_epilogue.tensor_D.sync_host(); EXPECT_GT(cutlass::reference::host::TensorNorm(impl_.collective_mma_inputs.tensor_A.host_view()), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(impl_.collective_mma_inputs.tensor_B.host_view()), 0); if (impl_.collective_epilogue.tensor_D.size() > 1) { EXPECT_GT(cutlass::reference::host::TensorNorm(impl_.collective_epilogue.tensor_D.host_view()), 0); } if (impl_.collective_epilogue.reference_D.size() > 1) { EXPECT_GT(cutlass::reference::host::TensorNorm(impl_.collective_epilogue.reference_D.host_view()), 0); } bool passed = cutlass::reference::host::TensorEquals(impl_.collective_epilogue.reference_D.host_view(), impl_.collective_epilogue.tensor_D.host_view()); EXPECT_TRUE(passed); if (!passed) { std::stringstream fname; fname << "error_Gemm_device_broadcast" << M << "x" << N << "x" << K << "x" << L << "_" << cute::get<0>(typename Gemm::GemmKernel::TileShape{}) << "_" << cute::get<1>(typename Gemm::GemmKernel::TileShape{}) << "_" << cute::get<2>(typename Gemm::GemmKernel::TileShape{}) << ".txt"; std::ofstream file(fname.str()); file << "problem: " << ' ' << M << "x" << N << "x" << K << ", Batch count = " << L << ", alpha: " << float(alpha) << ", beta: " << float(beta) << ", use_bias: " << use_bias << ", per-col bias: " << PerColBias << "\n\n"; if (use_bias){ file << "Bias = \n" << bias.host_view()<< "\n\n"; } file << "A =\n" << impl_.collective_mma_inputs.tensor_A.host_view() << "\nB =\n" << impl_.collective_mma_inputs.tensor_B.host_view() << "\nC0 =\n" << impl_.collective_epilogue.tensor_C.host_view() << "\nC1 =\n" << tensor_C1.host_view() << "\n\nReference =\n" << impl_.collective_epilogue.reference_D.host_view() << "\n\nComputed =\n" <<impl_.collective_epilogue.tensor_D.host_view(); } return passed; } /// Verifies the result matches the GEMM with elementwise tensor-tensor /// broadcast operation bool verify( ProblemShapeType problem_size, ElementScalar alpha, ElementScalar beta, bool use_bias) { auto problem_shape_MNKL = cute::append<4>(problem_size, 1); auto M = cute::get<0>(problem_shape_MNKL); auto N = cute::get<1>(problem_shape_MNKL); auto K = cute::get<2>(problem_shape_MNKL); auto L = cute::get<3>(problem_shape_MNKL); auto A = cute::make_tensor(impl_.collective_mma_inputs.tensor_A.host_data(), cute::make_layout(cute::make_shape(M, K, L), impl_.collective_mma_inputs.stride_a)); auto B = cute::make_tensor(impl_.collective_mma_inputs.tensor_B.host_data(), cute::make_layout(cute::make_shape(N, K, L), impl_.collective_mma_inputs.stride_b)); auto D = cute::make_tensor(impl_.collective_epilogue.reference_D.host_data(), cute::make_layout(cute::make_shape(M, N, L), impl_.collective_epilogue.stride_d)); auto Bias = cute::make_tensor(static_cast<ElementBias*>(use_bias ? bias.host_data() : nullptr), cute::make_layout(PerColBias ? cute::make_shape(1, N) : cute::make_shape(M, 1))); auto C0 = cute::make_tensor(impl_.collective_epilogue.tensor_C.host_data(), cute::make_layout(cute::make_shape(M, N, L), impl_.collective_epilogue.stride_c)); auto C1 = cute::make_tensor(tensor_C1.host_data(), cute::make_layout(cute::make_shape(M, N, L), impl_.collective_epilogue.stride_c)); // Create host workspace for output of testbed. This computes a portion of the epilogue: // ref_compute_out = Activation(alpha * (A @ B) + bias) cutlass::HostTensor<ElementCompute, LayoutTagC> ref_compute_out; auto c_coord = cutlass::make_Coord(M * L, N); ref_compute_out.resize(c_coord, cutlass::layout::Affine2Layout_Factory<LayoutTagD>::layout_factory(c_coord, impl_.collective_epilogue.stride_factor_C), false); auto RefComputeOut = cute::make_tensor(ref_compute_out.host_data(), cute::make_layout(cute::make_shape(M, N, L), impl_.collective_epilogue.stride_c)); cutlass::reference::host::GettMainloopParams<ElementAccumulator, decltype(A), decltype(B)> mainloop_params{A, B}; // Use a dummy null tensor for operand C because the epilogue overrides C. auto dummy_C = cute::make_tensor(static_cast<ElementC*>(nullptr), cute::make_layout(cute::make_shape(M, N, L), impl_.collective_epilogue.stride_c)); ElementCompute dummy_beta(0); auto dummy_Aux = cute::make_tensor(static_cast<ElementD*>(nullptr), cute::make_layout(cute::make_shape(M, N, L), impl_.collective_epilogue.stride_d)); auto dummy_Valpha = cute::make_tensor(static_cast<ElementCompute*>(nullptr), cute::make_layout(cute::make_shape(M, 1))); auto dummy_Vbeta = cute::make_tensor(static_cast<ElementCompute*>(nullptr), cute::make_layout(cute::make_shape(M, 1))); cutlass::reference::host::GettEpilogueParams< ElementScalar, ElementScalar, ElementAccumulator, ElementCompute, decltype(dummy_C), decltype(RefComputeOut), decltype(Bias), decltype(dummy_Aux), decltype(dummy_Valpha), decltype(dummy_Vbeta), ActivationFunctor, cutlass::plus<ElementCompute>, PerColBias> epilogue_params{ alpha, dummy_beta, dummy_C, RefComputeOut, Bias, dummy_Aux, dummy_Valpha, dummy_Vbeta }; cutlass::reference::host::Gemm3x(mainloop_params, epilogue_params); cutlass::NumericConverter<ElementCompute, ElementC, Epilogue::ThreadEpilogueOp::kRound> source_converter; cutlass::NumericConverter<ElementD, ElementCompute, Epilogue::ThreadEpilogueOp::kRound> destination_converter; cutlass::multiplies<ElementCompute> mul; // Compute broadcast operations atop the reference #pragma omp parallel for collapse(3) for (int64_t l = 0; l < cute::size<2>(A.layout()); ++l) { for (int64_t m = 0; m < cute::size<0>(A.layout()); ++m) { for (int64_t n = 0; n < cute::size<0>(B.layout()); ++n) { ElementCompute intermediate = RefComputeOut(m, n, l); // Apply BinaryOp0, if needed if constexpr (IsBinaryOp0Enabled) { typename Epilogue::ThreadEpilogueOp::BinaryOp0 bin0; ElementCompute converted_source = source_converter(C0(m, n, l)); intermediate = bin0(intermediate, mul(beta, converted_source)); } // Apply BinaryOp1, if needed if constexpr (IsBinaryOp1Enabled) { typename Epilogue::ThreadEpilogueOp::BinaryOp1 bin1; ElementCompute converted_source = source_converter(C1(m, n, l)); intermediate = bin1(intermediate, mul(beta, converted_source)); } // Apply UnaryOp, if needed if constexpr (IsUnaryOpEnabled) { typename Epilogue::ThreadEpilogueOp::UnaryOp unary; intermediate = unary(intermediate); } D(m, n, l) = destination_converter(intermediate); } } } return compare_reference(problem_shape_MNKL, alpha, beta, use_bias); } /// Executes one test bool run( ProblemShapeType problem_size, ElementScalar alpha = ElementScalar(1), ElementScalar beta = ElementScalar(0), bool profiling = false, int iterations = 20, bool use_bias = true) { // Fail test if insufficient CUDA device if (!impl_.sufficient()) { std::cout << "Test failed due to insufficient CUDA device." << std::endl; return false; } // // Initialize the GEMM operator // typename Gemm::Arguments arguments; cutlass::KernelHardwareInfo hw_info; hw_info.device_id = 0; if (not profiling) { impl_.sm_count = std::min(impl_.MaxSmCount, cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id)); hw_info.sm_count = impl_.sm_count; } else { impl_.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id); hw_info.sm_count = impl_.sm_count; } /// Initializes data structures /// A/B/C0/D Tensor initialize(problem_size); initialize_bias(problem_size); if constexpr (IsBinaryOp1Enabled) { initialize_c1(problem_size); } arguments = typename Gemm::Arguments{ cutlass::gemm::GemmUniversalMode::kGemm, problem_size, { impl_.collective_mma_inputs.tensor_A.device_data(), impl_.collective_mma_inputs.stride_a, impl_.collective_mma_inputs.tensor_B.device_data(), impl_.collective_mma_inputs.stride_b, impl_.mma_promotion_interval }, { // Epilogue arguments { alpha, beta }, // ThreadOp arguments impl_.collective_epilogue.stride_c, impl_.collective_epilogue.tensor_D.device_data(), impl_.collective_epilogue.stride_d, use_bias ? bias.device_data() : nullptr, impl_.collective_epilogue.tensor_C.device_data(), tensor_C1.device_data() }, // Epilogue arguments end hw_info }; Gemm gemm_op; size_t workspace_size = Gemm::get_workspace_size(arguments); cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); cutlass::Status status = gemm_op.can_implement(arguments); if (status != cutlass::Status::kSuccess) { cudaError_t error = cudaGetLastError(); std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n"; return true; } // // Run the GEMM // if (profiling) { return impl_.profile(problem_size, iterations, gemm_op, arguments, workspace); } else { cudaError_t result; status = gemm_op.initialize(arguments, workspace.get()); status = gemm_op.run(); result = cudaDeviceSynchronize(); if (result != cudaSuccess) { EXPECT_EQ(result, cudaSuccess) << "Error at Kernel Sync."; return false; } EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); // // Verify // bool passed = this->verify(problem_size, alpha, beta, use_bias); if (!passed) { std::cout << "Error : Failed : with alpha: " << float(alpha) << ", beta: " << float(beta) << ", use_bias: " << use_bias << "\n"; } return passed; } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Gemm> bool TestAllTensorBroadcast(bool use_bias=true) { using ElementScalar = typename Gemm::GemmKernel::CollectiveEpilogue::ElementScalar; using ProblemShapeType = typename Gemm::GemmKernel::ProblemShape; int max_alignment = std::max(Gemm::kAlignmentA, Gemm::kAlignmentB); std::vector<int> problem_size_m = {max_alignment, 512 - 3 * max_alignment}; std::vector<int> problem_size_n = {max_alignment, 512 - 2 * max_alignment}; if constexpr (cute::is_same_v<typename Gemm::GemmKernel::DispatchPolicy::Schedule, cutlass::gemm::KernelTmaWarpSpecializedPingpong>) { problem_size_m.push_back(768); problem_size_n.push_back(768); } constexpr int Stages = Gemm::GemmKernel::DispatchPolicy::Stages; constexpr int TileShapeK = cute::size<2>(typename Gemm::GemmKernel::TileShape{}); std::vector<int> problem_size_k = {max_alignment, TileShapeK * (Stages + 1) - max_alignment}; Testbed3xTensorBroadcast<Gemm> testbed; bool passed = true; for (int m : problem_size_m) { for (int n : problem_size_n) { for (int k : problem_size_k) { ProblemShapeType problem_size; if constexpr (cute::rank(ProblemShapeType{}) == 4) { problem_size = ProblemShapeType{m, n, k, /* l */ 1}; } else { problem_size = ProblemShapeType{m, n, k}; } for (bool use_bias : {true, false}) { passed = testbed.run( problem_size, cutlass::from_real<ElementScalar>(1), cutlass::from_real<ElementScalar>(1), false, // profiling 20, // iterations use_bias ); if (!passed) { return false; } } } } } if constexpr (cute::rank(ProblemShapeType{}) == 4) { auto problem_size = ProblemShapeType{256 + max_alignment, 256 + max_alignment, 160 + max_alignment, /* l */ 3}; passed = testbed.run( problem_size, cutlass::from_real<ElementScalar>(1), cutlass::from_real<ElementScalar>(1), false, // profiling 20 // iterations ); if (!passed) { return false; } } return passed; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace gemm } // namespace test /////////////////////////////////////////////////////////////////////////////////////////////////
test/unit/gemm/device/gemm_testbed_3x_tensor_broadcast.hpp/0
{ "file_path": "test/unit/gemm/device/gemm_testbed_3x_tensor_broadcast.hpp", "repo_id": "test", "token_count": 7979 }
58
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for grouped Rank2K interface */ #pragma once #include <fstream> #include <iostream> #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/rank_2k_grouped.h" #include "cutlass/gemm/kernel/default_rank_2k_grouped.h" #include "cutlass/gemm/device/rank_2k_grouped.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/rank_2k_complex.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/tensor_view_io.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Rank2K> struct TestbedGrouped { // // Type definitions // using ElementA = typename Rank2K::ElementA; using ElementB = typename Rank2K::ElementB; using ElementC = typename Rank2K::ElementC; using ElementAccumulator = typename Rank2K::ElementAccumulator; using EpilogueOutputOp = typename Rank2K::EpilogueOutputOp; using ElementCompute = typename EpilogueOutputOp::ElementCompute; using LayoutA = typename Rank2K::LayoutA; using LayoutB = typename Rank2K::LayoutB; using LayoutC = typename Rank2K::LayoutC; using MatrixCoord = typename LayoutC::TensorCoord; // // Data members // /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; uint32_t seed; int problem_count; std::vector<cutlass::gemm::GemmCoord> problem_sizes_host; cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device; std::vector<int64_t> offset_A; std::vector<int64_t> offset_B; std::vector<int64_t> offset_C; std::vector<int64_t> offset_D; std::vector<int64_t> lda_host; std::vector<int64_t> ldb_host; std::vector<int64_t> ldc_host; std::vector<int64_t> ldd_host; cutlass::DeviceAllocation<int64_t> lda; cutlass::DeviceAllocation<int64_t> ldb; cutlass::DeviceAllocation<int64_t> ldc; cutlass::DeviceAllocation<int64_t> ldd; cutlass::DeviceAllocation<ElementA> block_A; cutlass::DeviceAllocation<ElementB> block_B; cutlass::DeviceAllocation<ElementC> block_C; cutlass::DeviceAllocation<ElementC> block_D; cutlass::DeviceAllocation<ElementA *> ptr_A; cutlass::DeviceAllocation<ElementB *> ptr_B; cutlass::DeviceAllocation<ElementC *> ptr_C; cutlass::DeviceAllocation<ElementC *> ptr_D; // // Methods // TestbedGrouped( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint32_t seed_ = 3080 ): init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint32_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { double scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename Rank2K::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillRandomUniform( view, seed, scope_max, scope_min, 0); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential( view.data(), view.capacity()); } else { // no fill - remain zero } return true; } /// Initializes data structures void initialize() { // // Choose random problem sizes // // construct a few problems of random sizes srand(seed); int64_t total_elements_A = 0; int64_t total_elements_B = 0; int64_t total_elements_C = 0; int64_t total_elements_D = 0; lda_host.resize(problem_count); ldb_host.resize(problem_count); ldc_host.resize(problem_count); ldd_host.resize(problem_count); problem_sizes_host.clear(); problem_sizes_host.resize(problem_count); for (int32_t i = 0; i < problem_count; ++i) { auto N = 8 * (rand() % 64) + 24; auto K = 8 * (rand() % 64) + 24; cutlass::gemm::GemmCoord problem(N, N, K); if (!i) { problem = cutlass::gemm::GemmCoord(16, 16, 8); } problem_sizes_host.at(i) = problem; lda_host.at(i) = LayoutA::packed({problem.n(), problem.k()}).stride(0); ldb_host.at(i) = LayoutB::packed({problem.n(), problem.k()}).stride(0); ldc_host.at(i) = LayoutC::packed({problem.n(), problem.n()}).stride(0); ldd_host.at(i) = LayoutC::packed({problem.n(), problem.n()}).stride(0); offset_A.push_back(total_elements_A); offset_B.push_back(total_elements_B); offset_C.push_back(total_elements_C); offset_D.push_back(total_elements_D); int64_t elements_A = problem.n() * problem.k(); int64_t elements_B = problem.n() * problem.k(); int64_t elements_C = problem.n() * problem.n(); int64_t elements_D = problem.n() * problem.n(); total_elements_A += elements_A; total_elements_B += elements_B; total_elements_C += elements_C; total_elements_D += elements_D; // Random strides between problems? } problem_sizes_device.reset(problem_count); problem_sizes_device.copy_from_host(problem_sizes_host.data()); lda.reset(problem_count); ldb.reset(problem_count); ldc.reset(problem_count); ldd.reset(problem_count); lda.copy_from_host(lda_host.data()); ldb.copy_from_host(ldb_host.data()); ldc.copy_from_host(ldc_host.data()); ldd.copy_from_host(ldd_host.data()); // // Assign pointers // block_A.reset(total_elements_A); block_B.reset(total_elements_B); block_C.reset(total_elements_C); block_D.reset(total_elements_D); std::vector<ElementA *> ptr_A_host(problem_count); std::vector<ElementB *> ptr_B_host(problem_count); std::vector<ElementC *> ptr_C_host(problem_count); std::vector<ElementC *> ptr_D_host(problem_count); for (int32_t i = 0; i < problem_count; ++i) { ptr_A_host.at(i) = block_A.get() + offset_A.at(i); ptr_B_host.at(i) = block_B.get() + offset_B.at(i); ptr_C_host.at(i) = block_C.get() + offset_C.at(i); ptr_D_host.at(i) = block_D.get() + offset_D.at(i); } ptr_A.reset(problem_count); ptr_A.copy_from_host(ptr_A_host.data()); ptr_B.reset(problem_count); ptr_B.copy_from_host(ptr_B_host.data()); ptr_C.reset(problem_count); ptr_C.copy_from_host(ptr_C_host.data()); ptr_D.reset(problem_count); ptr_D.copy_from_host(ptr_D_host.data()); // // Initialize the problems of the workspace // for (int32_t i = 0; i < problem_count; ++i) { cutlass::gemm::GemmCoord problem = problem_sizes_host.at(i); LayoutA layout_A(lda_host.at(i)); LayoutB layout_B(ldb_host.at(i)); LayoutC layout_C(ldc_host.at(i)); LayoutC layout_D(ldd_host.at(i)); MatrixCoord extent_A{problem.n(), problem.k()}; MatrixCoord extent_B{problem.n(), problem.k()}; MatrixCoord extent_C{problem.n(), problem.n()}; std::vector<ElementA> matrix_A(layout_A.capacity(extent_A)); std::vector<ElementB> matrix_B(layout_B.capacity(extent_B)); std::vector<ElementC> matrix_C(layout_C.capacity(extent_C)); std::vector<ElementC> matrix_D(layout_D.capacity(extent_C)); initialize_tensor(cutlass::TensorView<ElementA, LayoutA>(matrix_A.data(), layout_A, extent_A), init_A, seed * 2021); initialize_tensor(cutlass::TensorView<ElementB, LayoutB>(matrix_B.data(), layout_B, extent_B), init_B, seed * 2022); initialize_tensor(cutlass::TensorView<ElementC, LayoutC>(matrix_C.data(), layout_C, extent_C), init_C, seed * 2023); cutlass::device_memory::copy_to_device(ptr_A_host.at(i), matrix_A.data(), matrix_A.size()); cutlass::device_memory::copy_to_device(ptr_B_host.at(i), matrix_B.data(), matrix_B.size()); cutlass::device_memory::copy_to_device(ptr_C_host.at(i), matrix_C.data(), matrix_C.size()); cutlass::device_memory::copy_to_device(ptr_D_host.at(i), matrix_D.data(), matrix_D.size()); } } /// Verifies the result is a Rank2K bool verify( ElementCompute alpha, ElementCompute beta) { bool passed = true; for (int32_t i = 0; i < problem_count; ++i) { cutlass::gemm::GemmCoord problem = problem_sizes_host.at(i); LayoutA layout_A(lda_host.at(i)); LayoutB layout_B(ldb_host.at(i)); LayoutC layout_C(ldc_host.at(i)); LayoutC layout_D(ldd_host.at(i)); MatrixCoord extent_A{problem.n(), problem.k()}; MatrixCoord extent_B{problem.n(), problem.k()}; MatrixCoord extent_C{problem.n(), problem.n()}; std::vector<ElementA> matrix_A(layout_A.capacity(extent_A)); std::vector<ElementB> matrix_B(layout_B.capacity(extent_B)); std::vector<ElementC> matrix_C(layout_C.capacity(extent_C)); std::vector<ElementC> matrix_D(layout_D.capacity(extent_C)); std::vector<ElementC> matrix_Ref(layout_D.capacity(extent_C)); cutlass::device_memory::copy_to_host(matrix_A.data(), block_A.get() + offset_A.at(i), matrix_A.size()); cutlass::device_memory::copy_to_host(matrix_B.data(), block_B.get() + offset_B.at(i), matrix_B.size()); cutlass::device_memory::copy_to_host(matrix_C.data(), block_C.get() + offset_C.at(i), matrix_C.size()); cutlass::device_memory::copy_to_host(matrix_D.data(), block_D.get() + offset_D.at(i), matrix_D.size()); cutlass::TensorView<ElementA, LayoutA> view_A(matrix_A.data(), layout_A, extent_A); cutlass::TensorView<ElementB, LayoutB> view_B(matrix_B.data(), layout_B, extent_B); cutlass::TensorView<ElementC, LayoutC> view_C(matrix_C.data(), layout_C, extent_C); cutlass::TensorView<ElementC, LayoutC> view_D(matrix_D.data(), layout_D, extent_C); cutlass::TensorView<ElementC, LayoutC> view_Ref(matrix_Ref.data(), layout_D, extent_C); // Reference Rank2K cutlass::reference::host::Rank2KComplex< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementCompute, ElementAccumulator >( problem, alpha, view_A, Rank2K::kTransformA, view_B, Rank2K::kTransformB, beta, view_C, view_Ref, ElementAccumulator(0), Rank2K::kFillModeC, Rank2K::kBlasMode ); // Ensure that no input or output is entirely zero EXPECT_GT(cutlass::reference::host::TensorNorm(view_A), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(view_B), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(view_C), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(view_D), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(view_Ref), 0); // Compare against reference passed = cutlass::reference::host::TensorEquals(view_D, view_Ref); if (!passed) { std::ofstream file("testbed_grouped_errors.txt"); file << "problem: " << problem << " [group: " << i << "]\n" << ", alpha: " << alpha << ", beta: " << beta << "\n\n"; file << "A =\n" << view_A << "\nB =\n" << view_B << "\nC =\n" << view_C << "\n\nReference =\n" << view_Ref << "\nComputed =\n" << view_D; return passed; } } return passed; } /// Executes one test bool run( int problem_count, ElementCompute alpha = ElementCompute(1), ElementCompute beta = ElementCompute(0)) { this->problem_count = problem_count; // Initialize the problem initialize(); int threadblock_count = Rank2K::sufficient(problem_sizes_host.data(), problem_count); // Early exit if (!threadblock_count) { if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { std::cerr << "Test waived due to insufficient CUDA device resources." << std::endl; } return true; } // Configure the Rank2K arguments typename EpilogueOutputOp::Params epilogue_op(alpha, beta); // Configure Rank2K arguments typename Rank2K::Arguments args( cutlass::gemm::GemmUniversalMode::kGemm, problem_sizes_device.get(), problem_count, threadblock_count, epilogue_op, ptr_A.get(), ptr_B.get(), ptr_C.get(), ptr_D.get(), lda.get(), ldb.get(), ldc.get(), ldd.get(), problem_sizes_host.data() ); // Initialize the Rank2K object Rank2K rank2k; size_t workspace_size = rank2k.get_workspace_size(args); cutlass::DeviceAllocation<uint8_t> workspace(workspace_size); cutlass::Status status = rank2k.initialize(args, workspace.get()); if (status != cutlass::Status::kSuccess) { return false; } // Run the Rank2K object status = rank2k.run(); if (status != cutlass::Status::kSuccess) { return false; } // Wait for completion cudaError_t result = cudaDeviceSynchronize(); EXPECT_EQ(result, cudaSuccess) << "Kernel execution error: " << cudaGetErrorString(result); if (result != cudaSuccess) { return false; } // Verify correctness return verify(alpha, beta); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // device } // gemm } // test /////////////////////////////////////////////////////////////////////////////////////////////////
test/unit/gemm/device/testbed_grouped_rank_2k.h/0
{ "file_path": "test/unit/gemm/device/testbed_grouped_rank_2k.h", "repo_id": "test", "token_count": 6616 }
59
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for warp-level wmma gemm */ #include "cutlass/arch/wmma.h" #if defined(CUTLASS_ARCH_WMMA_SM70_ENABLED) #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/half.h" #include "cutlass/gemm/warp/default_mma_wmma_tensor_op.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/gemm.h" #include "testbed.h" /// Test name format: SM[arch]_warp_wmma_[alayout]_[blayout]_[clayout]_[dtype].[threadblock_shape]_[warp_shape] //////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////// f16 accumulation point wmma.mma ////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////// [START] Verifying all layouts {N,T}x{N,T}=>{N,T} for WMMA 16x16x16 [START] ////////////////////// //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// // 4 tests for {N,T}x{N,T}=>{T} TEST(SM70_warp_wmma_row_col_row_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_col_row_row_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_row_row_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_col_col_row_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } // 4 tests for {N,T}x{N,T}=>{N} TEST(SM70_warp_wmma_row_col_col_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_col_row_col_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_row_col_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.col.row.m16n16k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_col_col_col_f16, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } /////////// [END] Verifying all layouts {N,T}x{N,T}=>{N,T} for WMMA 16x16x16 [END] /////////////////////////// TEST(SM70_warp_wmma_row_col_row_f16, 64x64x16_64x64x16_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 16> >().run(); } TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_64x64x32_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 32> >().run(); } TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_64x32x32_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 32> >().run(); } TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_32x64x32_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 32> >().run(); } TEST(SM70_warp_wmma_row_col_row_f16, 64x64x32_32x32x32_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 32> >().run(); } TEST(SM70_warp_wmma_row_col_row_f16, 128x128x16_64x64x16_16x16x16) { // Even though the test launches 128x128x16 CTA tile this test only verfies one warp // , i.e., warp_0 of size 64x64x16 out of the four warps required to cover the CTA using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<128, 128, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m32n8k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_col_row_f16, 32x8x16_32x8x16_32x8x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<32, 8, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m8n32k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_col_row_f16, 8x32x16_8x32x16_32x8x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<8, 32, 16> >().run(); } //////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.col.row.m8n32k16.f16.f16 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_col_row_row_f16, 8x32x16_8x32x16_8x32x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<8, 32, 16> >().run(); } TEST(SM70_warp_wmma_col_row_row_f16, 32x8x16_32x8x16_32x8x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<32, 8, 16> >().run(); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////// f32 accumulation point wmma.mma ////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m16n16k16.f32.f32 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_col_row_f32, 16x16x16_16x16x16_16x16x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >().run(); } TEST(SM70_warp_wmma_row_col_row_f32, 64x64x16_64x64x16_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 16> >().run(); } TEST(SM70_warp_wmma_row_col_row_f32, 64x64x32_64x64x32_16x16x16) { using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<64, 64, 32> >().run(); } TEST(SM70_warp_wmma_row_col_row_f32, 128x128x16_64x64x16_16x16x16) { // Even though the test launches 128x128x16 CTA tile this test only verfies one warp // , i.e., warp_0 of size 64x64x16 out of the four warps required to cover the CTA using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<128, 128, 16> >().run(); } ///////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m32n8k16.f32.f32 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_col_row_f32, 32x8x16_32x8x16_32x8x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<32, 8, 16> >().run(); } ///////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m8n32k16.f32.f32 //////////////////////////////////////////////////////////// TEST(SM70_warp_wmma_row_col_row_f32, 8x32x16_8x32x16_8x32x16) { // Threadblock and warp with just one native WMMA operation (most basic unit test) using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using WmmaTensorOp = typename cutlass::gemm::warp::DefaultMmaTensorOpWmma< WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC>::Type; test::gemm::warp::Testbed<WmmaTensorOp, cutlass::gemm::GemmShape<8, 32, 16> >().run(); } #endif //CUTLASS_ARCH_WMMA_SM70_ENABLED
test/unit/gemm/warp/wmma_sm70.cu/0
{ "file_path": "test/unit/gemm/warp/wmma_sm70.cu", "repo_id": "test", "token_count": 9424 }
60
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief CUTLASS Library is an object-oriented approach to managing operations implemented by CUTLASS. Generally, description - compile-time constant parameters used to instantiate an operation configuration - runtime parameters with computationally expensive initialization arguments - runtime parameters that may be passed to an initialized operation with low computational overhead */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/complex.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/mma.h" #include "cutlass/layout/matrix.h" #include "cutlass/library/library.h" #include "cutlass/library/arch_mappings.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace library { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct NumericTypeMap; template <> struct NumericTypeMap<void> { static NumericTypeID const kId = NumericTypeID::kVoid; }; template <> struct NumericTypeMap<cutlass::uint1b_t> { static NumericTypeID const kId = NumericTypeID::kB1; }; template <> struct NumericTypeMap<cutlass::int4b_t> { static NumericTypeID const kId = NumericTypeID::kS4; }; template <> struct NumericTypeMap<int8_t> { static NumericTypeID const kId = NumericTypeID::kS8; }; template <> struct NumericTypeMap<int16_t> { static NumericTypeID const kId = NumericTypeID::kS16; }; template <> struct NumericTypeMap<int32_t> { static NumericTypeID const kId = NumericTypeID::kS32; }; template <> struct NumericTypeMap<int64_t> { static NumericTypeID const kId = NumericTypeID::kS64; }; template <> struct NumericTypeMap<cutlass::uint4b_t> { static NumericTypeID const kId = NumericTypeID::kU4; }; template <> struct NumericTypeMap<uint8_t> { static NumericTypeID const kId = NumericTypeID::kU8; }; template <> struct NumericTypeMap<cutlass::float_e4m3_t> { static NumericTypeID const kId = NumericTypeID::kFE4M3; }; template <> struct NumericTypeMap<cutlass::float_e5m2_t> { static NumericTypeID const kId = NumericTypeID::kFE5M2; }; template <> struct NumericTypeMap<uint16_t> { static NumericTypeID const kId = NumericTypeID::kU16; }; template <> struct NumericTypeMap<uint32_t> { static NumericTypeID const kId = NumericTypeID::kU32; }; template <> struct NumericTypeMap<uint64_t> { static NumericTypeID const kId = NumericTypeID::kU64; }; template <> struct NumericTypeMap<cutlass::half_t> { static NumericTypeID const kId = NumericTypeID::kF16; }; template <> struct NumericTypeMap<float> { static NumericTypeID const kId = NumericTypeID::kF32; }; template <> struct NumericTypeMap<double> { static NumericTypeID const kId = NumericTypeID::kF64; }; template <> struct NumericTypeMap<cutlass::complex<cutlass::half_t> > { static NumericTypeID const kId = NumericTypeID::kCF16; }; template <> struct NumericTypeMap<cutlass::complex<float> > { static NumericTypeID const kId = NumericTypeID::kCF32; }; template <> struct NumericTypeMap<cutlass::complex<double> > { static NumericTypeID const kId = NumericTypeID::kCF64; }; template <> struct NumericTypeMap<cutlass::bfloat16_t> { static NumericTypeID const kId = NumericTypeID::kBF16; }; template <> struct NumericTypeMap<cutlass::tfloat32_t> { static NumericTypeID const kId = NumericTypeID::kTF32; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct MathOperationMap { static MathOperationID const kId = MathOperationID::kInvalid; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAdd> { static MathOperationID const kId = MathOperationID::kMultiplyAdd; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddFastBF16> { static MathOperationID const kId = MathOperationID::kMultiplyAddFastBF16; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddFastF16> { static MathOperationID const kId = MathOperationID::kMultiplyAddFastF16; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddSaturate> { static MathOperationID const kId = MathOperationID::kMultiplyAddSaturate; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddMixedInputUpcast> { static MathOperationID const kId = MathOperationID::kMultiplyAddMixedInputUpcast; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddComplex> { static MathOperationID const kId = MathOperationID::kMultiplyAddComplex; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddGaussianComplex> { static MathOperationID const kId = MathOperationID::kMultiplyAddGaussianComplex; }; template <> struct MathOperationMap<cutlass::arch::OpXorPopc> { static MathOperationID const kId = MathOperationID::kXorPopc; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddFastF32> { static MathOperationID const kId = MathOperationID::kMultiplyAddFastF32; }; template <> struct MathOperationMap<cutlass::arch::OpMultiplyAddComplexFastF32> { static MathOperationID const kId = MathOperationID::kMultiplyAddComplexFastF32; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct LayoutMap; template <> struct LayoutMap<cutlass::layout::ColumnMajor> { static LayoutTypeID const kId = LayoutTypeID::kColumnMajor; }; template <> struct LayoutMap<cutlass::layout::RowMajor> { static LayoutTypeID const kId = LayoutTypeID::kRowMajor; }; template <> struct LayoutMap<cutlass::layout::ColumnMajorInterleaved<2>> { static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK2; }; template <> struct LayoutMap<cutlass::layout::RowMajorInterleaved<2>> { static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK2; }; template <> struct LayoutMap<cutlass::layout::ColumnMajorInterleaved<4>> { static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK4; }; template <> struct LayoutMap<cutlass::layout::RowMajorInterleaved<4>> { static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK4; }; template <> struct LayoutMap<cutlass::layout::ColumnMajorInterleaved<16>> { static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK16; }; template <> struct LayoutMap<cutlass::layout::RowMajorInterleaved<16>> { static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK16; }; template <> struct LayoutMap<cutlass::layout::ColumnMajorInterleaved<32>> { static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK32; }; template <> struct LayoutMap<cutlass::layout::RowMajorInterleaved<32>> { static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK32; }; template <> struct LayoutMap<cutlass::layout::ColumnMajorInterleaved<64>> { static LayoutTypeID const kId = LayoutTypeID::kColumnMajorInterleavedK64; }; template <> struct LayoutMap<cutlass::layout::RowMajorInterleaved<64>> { static LayoutTypeID const kId = LayoutTypeID::kRowMajorInterleavedK64; }; template <> struct LayoutMap<cutlass::layout::TensorNHWC> { static LayoutTypeID const kId = LayoutTypeID::kTensorNHWC; }; template <> struct LayoutMap<cutlass::layout::TensorNDHWC> { static LayoutTypeID const kId = LayoutTypeID::kTensorNDHWC; }; template <> struct LayoutMap<cutlass::layout::TensorNCxHWx<32>> { static LayoutTypeID const kId = LayoutTypeID::kTensorNC32HW32; }; template <> struct LayoutMap<cutlass::layout::TensorNCxHWx<64>> { static LayoutTypeID const kId = LayoutTypeID::kTensorNC64HW64; }; template <> struct LayoutMap<cutlass::layout::TensorCxRSKx<32>> { static LayoutTypeID const kId = LayoutTypeID::kTensorC32RSK32; }; template <> struct LayoutMap<cutlass::layout::TensorCxRSKx<64>> { static LayoutTypeID const kId = LayoutTypeID::kTensorC64RSK64; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct OpcodeClassMap; template <> struct OpcodeClassMap<arch::OpClassSimt> { static OpcodeClassID const kId = OpcodeClassID::kSimt; }; template <> struct OpcodeClassMap<arch::OpClassTensorOp> { static OpcodeClassID const kId = OpcodeClassID::kTensorOp; }; template <> struct OpcodeClassMap<arch::OpClassSparseTensorOp> { static OpcodeClassID const kId = OpcodeClassID::kSparseTensorOp; }; template <> struct OpcodeClassMap<arch::OpClassWmmaTensorOp> { static OpcodeClassID const kId = OpcodeClassID::kWmmaTensorOp; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <cutlass::ComplexTransform Transform> struct ComplexTransformMap; template <> struct ComplexTransformMap<cutlass::ComplexTransform::kNone> { static cutlass::library::ComplexTransform const kId = cutlass::library::ComplexTransform::kNone; }; template <> struct ComplexTransformMap<cutlass::ComplexTransform::kConjugate> { static cutlass::library::ComplexTransform const kId = cutlass::library::ComplexTransform::kConjugate; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <cutlass::conv::Mode T> struct ConvModeMap; template <> struct ConvModeMap<conv::Mode::kCrossCorrelation> { static ConvModeID const kId = ConvModeID::kCrossCorrelation; }; template <> struct ConvModeMap<conv::Mode::kConvolution> { static ConvModeID const kId = ConvModeID::kConvolution; }; template <cutlass::conv::Operator T> struct ConvKindMap; template <> struct ConvKindMap<conv::Operator::kFprop> { static ConvKind const kId = ConvKind::kFprop; }; template <> struct ConvKindMap<conv::Operator::kDgrad> { static ConvKind const kId = ConvKind::kDgrad; }; template <> struct ConvKindMap<conv::Operator::kWgrad> { static ConvKind const kId = ConvKind::kWgrad; }; template <cutlass::conv::IteratorAlgorithm T> struct IteratorAlgorithmMap; template <> struct IteratorAlgorithmMap<conv::IteratorAlgorithm::kAnalytic> { static IteratorAlgorithmID const kId = IteratorAlgorithmID::kAnalytic; }; template <> struct IteratorAlgorithmMap<conv::IteratorAlgorithm::kOptimized> { static IteratorAlgorithmID const kId = IteratorAlgorithmID::kOptimized; }; template <> struct IteratorAlgorithmMap<conv::IteratorAlgorithm::kFixedChannels> { static IteratorAlgorithmID const kId = IteratorAlgorithmID::kFixedChannels; }; template <> struct IteratorAlgorithmMap<conv::IteratorAlgorithm::kFewChannels> { static IteratorAlgorithmID const kId = IteratorAlgorithmID::kFewChannels; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Element, typename Layout> TensorDescription make_TensorDescription(int alignment = 1) { TensorDescription desc; desc.element = NumericTypeMap<Element>::kId; desc.layout = LayoutMap<Layout>::kId; desc.alignment = alignment; desc.log_extent_range = int(sizeof(typename Layout::TensorCoord::Index) - 1) * 8; desc.log_stride_range = int(sizeof(typename Layout::Stride::Index) - 1) * 8; return desc; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
tools/library/src/library_internal.h/0
{ "file_path": "tools/library/src/library_internal.h", "repo_id": "tools", "token_count": 3995 }
61
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Defines profiling functionality for convolution */ #pragma once #include <vector> #include <string> #include <memory> #include <algorithm> #include <unordered_map> // CUTLASS Library includes #include "cutlass/library/library.h" #include "cutlass/library/util.h" #include "cutlass/library/handle.h" #include "cutlass/library/manifest.h" #include "cutlass/library/singleton.h" // Profiler includes #include "options.h" #include "device_context.h" #include "operation_profiler.h" #include "performance_result.h" #include "problem_space.h" #include "reduction_operation_profiler.h" #if CUTLASS_ENABLE_CUDNN #include "cudnn_helpers.h" #endif //#if CUTLASS_ENABLE_CUDNN #include "debug.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Abstract base class for each math function class Conv2dOperationProfiler : public OperationProfiler { public: /// Problem structure obtained from problem space struct Conv2dProblem { int64_t n, h, w, c, p, q, k, r, s; int64_t groups; int64_t pad_h, pad_w; int64_t stride_h, stride_w; int64_t dilation_h, dilation_w; std::vector<uint8_t> alpha; std::vector<uint8_t> beta; library::SplitKMode split_k_mode; int64_t split_k_slices; library::ConvModeID conv_mode; library::Provider eq_gemm_provider; // convolution with parallel interleaved reduction // convolution epilogue (alpha, beta) = (1.0, 0.0) // reduction epilogue (alpha, beta) = (Conv2dProblem::alpha, Conv2dProblem::beta) std::vector<uint8_t> alpha_one; std::vector<uint8_t> beta_zero; // // Methods // /// Total number of bytes loaded int64_t bytes(library::ConvDescription const &operation_desc) const; /// Total number of flops computed int64_t flops(library::ConvDescription const &operation_desc) const; void set_default_output_size() { p = ((h + pad_h - r * dilation_h) / stride_h) + 1; q = ((w + pad_w - s * dilation_w) / stride_w) + 1; } // Returns equivalent gemm problem size for convolution cutlass::gemm::GemmCoord eq_gemm_size(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return cutlass::gemm::GemmCoord(int(n * p * q), int(k), int(r * s * c / groups)); case library::ConvKind::kDgrad: return cutlass::gemm::GemmCoord(int(n * h * w), int(c), int(k * r * s)); case library::ConvKind::kWgrad: return cutlass::gemm::GemmCoord(int(k), int(r * s * c), int(n * p * q)); default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns extent for tensor A std::vector<int> extent_a(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return {int(n), int(h), int(w), int(c)}; case library::ConvKind::kDgrad: return {int(n), int(p), int(q), int(k)}; case library::ConvKind::kWgrad: return {int(n), int(p), int(q), int(k)}; default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns extent for tensor B std::vector<int> extent_b(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return {int(k), int(r), int(s), int(c / groups)}; case library::ConvKind::kDgrad: return {int(k), int(r), int(s), int(c)}; case library::ConvKind::kWgrad: return {int(n), int(h), int(w), int(c)}; default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns extent for tensor C std::vector<int> extent_c(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return {int(n), int(p), int(q), int(k)}; case library::ConvKind::kDgrad: return {int(n), int(h), int(w), int(c)}; case library::ConvKind::kWgrad: return {int(k), int(r), int(s), int(c)}; default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns layout for equivalent gemm matrix A library::LayoutTypeID eq_gemm_layout_a(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return library::LayoutTypeID::kRowMajor; // TN Gemm case library::ConvKind::kDgrad: return library::LayoutTypeID::kRowMajor; // TT Gemm case library::ConvKind::kWgrad: return library::LayoutTypeID::kColumnMajor; // NT Gemm default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns layout for equivalent gemm matrix B library::LayoutTypeID eq_gemm_layout_b(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return library::LayoutTypeID::kColumnMajor; // TN Gemm case library::ConvKind::kDgrad: return library::LayoutTypeID::kRowMajor; // TT Gemm case library::ConvKind::kWgrad: return library::LayoutTypeID::kRowMajor; // NT Gemm default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns layout for equivalent gemm matrix C library::LayoutTypeID eq_gemm_layout_c(library::ConvKind const &conv_kind) const { switch (conv_kind) { // Gemm operator assumes column-major output case library::ConvKind::kFprop: case library::ConvKind::kDgrad: case library::ConvKind::kWgrad: return library::LayoutTypeID::kColumnMajor; default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns leading dimension for equivalent gemm matrix A int64_t eq_gemm_lda(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return eq_gemm_size(conv_kind).k(); case library::ConvKind::kDgrad: return eq_gemm_size(conv_kind).k(); case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).m(); default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns leading dimension for equivalent gemm matrix B int64_t eq_gemm_ldb(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: return eq_gemm_size(conv_kind).k(); case library::ConvKind::kDgrad: return eq_gemm_size(conv_kind).n(); case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).n(); default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns leading dimension for equivalent gemm matrix C int64_t eq_gemm_ldc(library::ConvKind const &conv_kind) const { switch (conv_kind) { case library::ConvKind::kFprop: case library::ConvKind::kDgrad: case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).m(); default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } }; /// Workspace used struct Conv2dWorkspace { /// Conv device allocations DeviceAllocation *A; DeviceAllocation *B; DeviceAllocation *reordered_B; DeviceAllocation *C; DeviceAllocation *Computed; DeviceAllocation *Reference; /// Library configuration and arguments for convolution operator library::Conv2dConfiguration configuration; library::ConvArguments arguments; /// Number of copies of the problem workspace which are visited sequentially during /// profiling to avoid camping in the last level cache. int problem_count; /// Buffer used for the cutlass conv2d operations' host workspace std::vector<uint8_t> host_workspace; /// Buffer used for the cutlass operations' device workspace DeviceAllocation device_workspace; /// Library configuration and arguments for reduction operator library::ReductionConfiguration reduction_configuration; library::ReductionArguments reduction_arguments; /// Buffer used for the cutlass reduction operations' host workspace std::vector<uint8_t> reduction_host_workspace; /// Host data buffers for host reference operation /// host buffer for tensor std::vector<uint8_t> host_tensor_a; /// host buffer for tensor b std::vector<uint8_t> host_tensor_b; /// host buffer for tensor c std::vector<uint8_t> host_tensor_c; // // Methods // Conv2dWorkspace() : A(nullptr), B(nullptr), reordered_B(nullptr), C(nullptr), Computed(nullptr), Reference(nullptr) {} // Set stride vector for tensor activations, filters, output void set_stride_vector(Conv2dProblem const &problem, library::ConvKind const &conv_kind, library::LayoutTypeID const &layout_a, library::LayoutTypeID const &layout_b, library::LayoutTypeID const &layout_c) { std::vector<int64_t> stride_activations; std::vector<int64_t> stride_filters; std::vector<int64_t> stride_output; // Strides for interleaved fprop if (conv_kind == library::ConvKind::kFprop && ((layout_a == library::LayoutTypeID::kTensorNC32HW32 && layout_b == library::LayoutTypeID::kTensorC32RSK32 && layout_c == library::LayoutTypeID::kTensorNC32HW32) || (layout_a == library::LayoutTypeID::kTensorNC64HW64 && layout_b == library::LayoutTypeID::kTensorC64RSK64 && layout_c == library::LayoutTypeID::kTensorNC64HW64))) { int interleave = (layout_a == library::LayoutTypeID::kTensorNC32HW32) ? 32 : 64; stride_activations.push_back(int(problem.w) * interleave); stride_activations.push_back(int(problem.w) * int(problem.h) * interleave); stride_activations.push_back(int(problem.h) * int(problem.w) * int(problem.c)); stride_filters.push_back(int(problem.k) * interleave); stride_filters.push_back(int(problem.k) * int(problem.s) * interleave); stride_filters.push_back(int(problem.k) * int(problem.s) * int(problem.r) * interleave); stride_output.push_back(int(problem.q) * interleave); stride_output.push_back(int(problem.q) * int(problem.p) * interleave); stride_output.push_back(int(problem.q) * int(problem.p) * int(problem.k)); } else { // Strides for the rest cases stride_activations.push_back(int(problem.c)); stride_activations.push_back(int(problem.w) * int(problem.c)); stride_activations.push_back(int(problem.h) * int(problem.w) * int(problem.c)); stride_filters.push_back(int(problem.c / problem.groups)); stride_filters.push_back(int(problem.s) * int(problem.c / problem.groups)); stride_filters.push_back(int(problem.r) * int(problem.s) * int(problem.c / problem.groups)); stride_output.push_back(int(problem.k)); stride_output.push_back(int(problem.q) * int(problem.k)); stride_output.push_back(int(problem.q) * int(problem.p) * int(problem.k)); } switch (conv_kind) { case library::ConvKind::kFprop: configuration.stride_a = stride_activations; configuration.stride_b = stride_filters; configuration.stride_c = stride_output; break; case library::ConvKind::kDgrad: configuration.stride_a = stride_output; configuration.stride_b = stride_filters; configuration.stride_c = stride_activations; break; case library::ConvKind::kWgrad: configuration.stride_a = stride_output; configuration.stride_b = stride_activations; configuration.stride_c = stride_filters; break; default: throw std::runtime_error( "Invalid Conv Operator (fprop, dgrad, wgrad)"); } } }; protected: // // Data members // /// CONV problem obtained from problem space Conv2dProblem problem_; /// Device memory allocations Conv2dWorkspace conv_workspace_; /// CUTLASS parallel reduction operation to follow this* conv2d operation library::Operation const *reduction_op_; public: // // Methods // /// Ctor Conv2dOperationProfiler(Options const &options); /// Destructor virtual ~Conv2dOperationProfiler(); Conv2dProblem const& problem() const { return problem_; } /// Prints usage statement for the math function virtual void print_usage(std::ostream &out) const; /// Prints examples virtual void print_examples(std::ostream &out) const; /// Extracts the problem dimensions virtual Status initialize_configuration( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Initializes workspace virtual Status initialize_workspace( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Verifies CUTLASS against references virtual bool verify_cutlass( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Measures performance results virtual bool profile( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); protected: /// Method to profile an initialized CUTLASS operation virtual Status profile_cutlass_( double &runtime, Options const &options, library::Operation const *operation, void *arguments, void *host_workspace, void *device_workspace); /// Initialize reduction problem dimensions and library::Operation bool initialize_reduction_configuration_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Initializes the performance result void initialize_result_( PerformanceResult &result, Options const &options, library::ConvDescription const &operation_desc, ProblemSpace const &problem_space); /// Verifies CUTLASS against host reference bool verify_with_host_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); /// Verifies CUTLASS against device reference bool verify_with_device_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); #if CUTLASS_ENABLE_CUDNN /// Verifies CUTLASS against cudnn reference bool verify_with_cudnn_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem); #endif //#if CUTLASS_ENABLE_CUDNN }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
tools/profiler/include/cutlass/profiler/conv2d_operation_profiler.h/0
{ "file_path": "tools/profiler/include/cutlass/profiler/conv2d_operation_profiler.h", "repo_id": "tools", "token_count": 6686 }
62
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <stdio.h> #include "cutlass/cutlass.h" /** * \file * \brief C++ interface to dump fragments and shared memory contents for * debugging. */ namespace cutlass { namespace debug { /****************************************************************************** * Dump the fragments ******************************************************************************/ /// The first N threads dump the first M elements from their fragments with a /// stride of S elements. If N is not specified, dump the data of all the /// threads. If M is not specified, dump all the elements of the fragment. template <typename Fragment> CUTLASS_DEVICE void dump_fragment(Fragment const& frag, int N = 0, int M = 0, int S = 1) { int total_threads = blockDim.x * blockDim.y * blockDim.z; int block_id = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int thread_id = (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; if (N < 0 || N > total_threads) { if (thread_id == 0 && block_id == 0) printf("Thread number N = %d should between [1, %d].\n", N, total_threads); __syncthreads(); return; } int total_elements = int(frag.size()); if (M < 0 || M > total_elements) { if (thread_id == 0 && block_id == 0) printf("Element number M = %d should between [1, %d].\n", M, total_elements); __syncthreads(); return; } if (N == 0) N = total_threads; if (M == 0) M = total_elements; if (S < 1 || S > M) { if (thread_id == 0 && block_id == 0) printf("Stride S = %d should between [1, %d].\n", S, M); __syncthreads(); return; } if (thread_id == 0 && block_id == 0) printf("\n*******************Dumping the fragments*******************\n\n"); CUTLASS_PRAGMA_NO_UNROLL for (int tid = 0; tid < N; ++tid) { if (tid == thread_id) { printf("TB%d W%d T%d: ", block_id, tid / 32, tid & 31); CUTLASS_PRAGMA_NO_UNROLL for (int i = 0; i < M; i += S) { printf("%.0f ", float(typename Fragment::value_type(frag[i]))); } printf("\n"); } __syncthreads(); } if (thread_id == 0 && block_id == 0) printf("\n***********************************************************\n\n"); __syncthreads(); return; } /****************************************************************************** * Dump the shared memory ******************************************************************************/ #define SHMEM_ROW_SIZE 128 /// Dump the shared memory contents. ptr is the begin address, size specifies /// the number of elements that need to be dumped, and S specifies the stride. template <typename Element> CUTLASS_DEVICE void dump_shmem(Element const* ptr, size_t size, int S = 1) { int block_id = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int thread_id = (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; if (ptr == nullptr) { if (thread_id == 0 && block_id == 0) printf("ptr is null.\n"); __syncthreads(); return; } if (size < 1) { if (thread_id == 0 && block_id == 0) printf("Element size is less than 1\n"); __syncthreads(); return; } int row_elements = SHMEM_ROW_SIZE / sizeof(Element); if (S < 1 || S > row_elements) { if (thread_id == 0 && block_id == 0) printf("Stride S = %d should between [1, %d].\n", S, row_elements); __syncthreads(); return; } __syncthreads(); if (thread_id == 0) printf("\n********Dumping the shared memory of TB %d*******\n\n", block_id); if (thread_id == 0) { for (int i = 0; i < size; i += row_elements) { for (int j = 0; j < row_elements; j += S) { printf("%.0f ", float(ptr[i + j])); } printf("\n"); } } if (thread_id == 0) printf("\n***********************************************************\n\n"); __syncthreads(); return; } } // namespace debug } // namespace cutlass
tools/util/include/cutlass/util/device_dump.h/0
{ "file_path": "tools/util/include/cutlass/util/device_dump.h", "repo_id": "tools", "token_count": 2057 }
63
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once /*! \file \brief HostTensor contributes management for both host and device memory. HostTensor allocates host and device memory upon construction. Basic element-wise operations on host memory synchronize device memory automatically. Explicit copy operations provide abstractions for CUDA memcpy operations. Call {host, device}_{data, ref, view}() for accessing host or device memory. See cutlass/tensor_ref.h and cutlass/tensor_view.h for more details. */ #include <vector> #include "cutlass/cutlass.h" #include "cutlass/tensor_ref_planar_complex.h" #include "cutlass/tensor_view_planar_complex.h" #include "device_memory.h" namespace cutlass { /////////////////////////////////////////////////////////////////////////////////////////////////// /// Host tensor template < /// Data type of element stored within tensor (concept: NumericType) typename Element_, /// Defines a mapping from logical coordinate to linear memory (concept: Layout) typename Layout_ > class HostTensorPlanarComplex { public: /// Data type of individual access using Element = Element_; /// Mapping function from logical coordinate to linear memory using Layout = Layout_; /// Logical rank of tensor index space static int const kRank = Layout::kRank; /// Index type using Index = typename Layout::Index; /// Long index used for pointer offsets using LongIndex = typename Layout::LongIndex; /// Coordinate in logical tensor space using TensorCoord = typename Layout::TensorCoord; /// Layout's stride vector using Stride = typename Layout::Stride; /// Tensor reference to device memory using TensorRef = TensorRefPlanarComplex<Element, Layout>; /// Tensor reference to constant device memory using ConstTensorRef = typename TensorRef::ConstTensorRef; /// Tensor reference to device memory using TensorView = TensorViewPlanarComplex<Element, Layout>; /// Tensor reference to constant device memory using ConstTensorView = typename TensorView::ConstTensorView; /// Reference to element in tensor using Reference = typename TensorRef::Reference; /// Constant reference to element in tensor using ConstReference = typename ConstTensorRef::Reference; private: // // Data members // /// Extent of tensor in logical dimensions TensorCoord extent_; /// Layout object Layout layout_; /// Host-side memory allocation std::vector<Element> host_; /// Device-side memory device_memory::allocation<Element> device_; public: // // Device and Host Methods // /// Default constructor HostTensorPlanarComplex() {} /// Constructs a tensor given an extent. Assumes a packed layout HostTensorPlanarComplex( TensorCoord const &extent, bool device_backed = true ) { this->reset(extent, Layout::packed(extent), device_backed); } /// Constructs a tensor given an extent and layout HostTensorPlanarComplex( TensorCoord const &extent, Layout const &layout, bool device_backed = true ) { this->reset(extent, layout, device_backed); } ~HostTensorPlanarComplex() { } /// Clears the HostTensor allocation to size/capacity = 0 void reset() { extent_ = TensorCoord(); layout_ = Layout::packed(extent_); host_.clear(); device_.reset(); } /// Resizes internal memory allocations without affecting layout or extent void reserve( size_t count, ///< size of tensor in elements bool device_backed_ = true) { ///< if true, device memory is also allocated device_.reset(); host_.clear(); host_.resize(count * 2); // Allocate memory Element* device_memory = nullptr; if (device_backed_) { device_memory = device_memory::allocate<Element>(count * 2); } device_.reset(device_memory, device_backed_ ? count * 2 : 0); } /// Updates the extent and layout of the HostTensor. Allocates memory according to the new /// extent and layout. void reset( TensorCoord const &extent, ///< extent of logical tensor Layout const &layout, ///< layout object of tensor bool device_backed_ = true) { ///< if true, device memory is also allocated. extent_ = extent; layout_ = layout; reserve(size_t(layout_.capacity(extent_)), device_backed_); } /// Updates the extent and layout of the HostTensor. Allocates memory according to the new /// extent and layout. Assumes a packed tensor configuration. void reset( TensorCoord const &extent, ///< extent of logical tensor bool device_backed_ = true) { ///< if true, device memory is also allocated. reset(extent, Layout::packed(extent), device_backed_); } /// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity. /// To force allocation, call reset(). void resize( TensorCoord const &extent, ///< extent of logical tensor Layout const &layout, ///< layout object of tensor bool device_backed_ = true) { ///< if true, device memory is also allocated. extent_ = extent; layout_ = layout; LongIndex new_size = size_t(layout_.capacity(extent_)); if (static_cast<decltype(host_.size())>(new_size * 2) > host_.size()) { reserve(new_size); } } /// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity. /// To force allocation, call reset(). Note, this form of resize() assumes a packed tensor configuration. void resize( TensorCoord const &extent, ///< extent of logical tensor bool device_backed_ = true) { ///< if true, device memory is also allocated. resize(extent, Layout::packed(extent), device_backed_); } /// Returns the number of elements stored in the host tensor size_t size() const { return host_.size() / 2; } /// Returns the logical capacity based on extent and layout. May differ from size(). LongIndex capacity() const { return layout_.capacity(extent_); } /// Stride between real and imaginary parts LongIndex imaginary_stride() const { return host_.size() / 2; } /// Gets pointer to host data Element * host_data() { return host_.data(); } /// Gets pointer to host data imaginary part Element * host_data_imag() { return host_.data() + imaginary_stride(); } /// Gets pointer to host data with a pointer offset Element * host_data_ptr_offset(LongIndex ptr_element_offset) { return host_data() + ptr_element_offset; } /// Gets pointer to host data with a pointer offset Element * host_data_imag_ptr_offset(LongIndex ptr_element_offset) { return host_data_imag() + ptr_element_offset; } /// Gets a reference to an element in host memory Reference host_data(LongIndex idx) { return PlanarComplexReference<Element>(host_data() + idx, host_data_imag() + idx); } /// Gets pointer to host data Element const * host_data() const { return host_.data(); } /// Gets pointer to host data imaginary part Element const * host_data_imag() const { return host_.data() + imaginary_stride(); } /// Gets a constant reference to an element in host memory ConstReference host_data(LongIndex idx) const { return PlanarComplexReference<Element const>(host_data() + idx, host_data_imag() + idx); } /// Gets pointer to device data Element * device_data() { return device_.get(); } /// Gets pointer to device data with a pointer offset Element * device_data_ptr_offset(LongIndex ptr_element_offset) { return device_.get() + ptr_element_offset; } /// Gets pointer to device data Element const * device_data() const { return device_.get(); } /// Gets pointer to device data with a pointer offset Element const * device_data_ptr_offset(LongIndex ptr_element_offset) const { return device_.get() + ptr_element_offset; } /// Gets a pointer to the device data imaginary part Element * device_data_imag() { return device_.get() + imaginary_stride(); } /// Accesses the tensor reference pointing to data TensorRef host_ref(LongIndex ptr_element_offset=0) { return TensorRef(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride()); } /// Returns a tensor reference to the real part of the tensor cutlass::TensorRef<Element, Layout> host_ref_real() { return cutlass::TensorRef<Element, Layout>(host_data(), layout_); } /// Returns a tensor reference to the real part of the tensor cutlass::TensorRef<Element, Layout> host_ref_imag() { return cutlass::TensorRef<Element, Layout>(host_data_ptr_offset(imaginary_stride()), layout_); } /// Accesses the tensor reference pointing to data ConstTensorRef host_ref(LongIndex ptr_element_offset=0) const { return ConstTensorRef(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride()); } /// Accesses the tensor reference pointing to data TensorRef device_ref(LongIndex ptr_element_offset=0) { return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride()); } /// Accesses the tensor reference pointing to data ConstTensorRef device_ref(LongIndex ptr_element_offset=0) const { return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride()); } /// Returns a tensor reference to the real part of the tensor cutlass::TensorRef<Element, Layout> device_ref_real() { return cutlass::TensorRef<Element, Layout>(device_data(), layout_); } /// Returns a tensor reference to the real part of the tensor cutlass::TensorRef<Element, Layout> device_ref_imag() { return cutlass::TensorRef<Element, Layout>(device_data_ptr_offset(imaginary_stride()), layout_); } /// Accesses the tensor reference pointing to data TensorView host_view(LongIndex ptr_element_offset=0) { return TensorView(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_); } /// Accesses the tensor reference pointing to data ConstTensorView host_view(LongIndex ptr_element_offset=0) const { return ConstTensorView(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_); } /// Accesses the tensor reference pointing to data cutlass::TensorView<Element, Layout> host_view_real() { return cutlass::TensorView<Element, Layout>(host_data(), layout_, extent_); } /// Accesses the tensor reference pointing to data cutlass::TensorView<Element, Layout> host_view_imag() { return cutlass::TensorView<Element, Layout>(host_data_ptr_offset(imaginary_stride()), layout_, extent_); } /// Accesses the tensor reference pointing to data TensorView device_view(LongIndex ptr_element_offset=0) { return TensorView(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_); } /// Accesses the tensor reference pointing to data ConstTensorView device_view(LongIndex ptr_element_offset=0) const { return ConstTensorView(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_); } /// Accesses the tensor reference pointing to data cutlass::TensorView<Element, Layout> device_view_real() { return cutlass::TensorView<Element, Layout>(device_data(), layout_, extent_); } /// Accesses the tensor reference pointing to data cutlass::TensorView<Element, Layout> device_view_imag() { return cutlass::TensorView<Element, Layout>(device_data_ptr_offset(imaginary_stride()), layout_, extent_); } /// Returns true if device memory is allocated bool device_backed() const { return (device_.get() == nullptr) ? false : true; } /// Returns the layout object Layout layout() const { return layout_; } /// Returns the layout object's stride vector Stride stride() const { return layout_.stride(); } /// Returns the layout object's stride in a given physical dimension Index stride(int dim) const { return layout_.stride().at(dim); } /// Computes the offset of an index from the origin of the tensor LongIndex offset(TensorCoord const& coord) const { return layout_(coord); } /// Returns a reference to the element at the logical Coord in host memory Reference at(TensorCoord const& coord) { return host_data(offset(coord)); } /// Returns a const reference to the element at the logical Coord in host memory ConstReference at(TensorCoord const& coord) const { return host_data(offset(coord)); } /// Returns the extent of the tensor TensorCoord extent() const { return extent_; } /// Returns the extent of the tensor TensorCoord & extent() { return extent_; } /// Copies data from device to host void sync_host() { if (device_backed()) { device_memory::copy_to_host( host_data(), device_data(), imaginary_stride() * 2); } } /// Copies data from host to device void sync_device() { if (device_backed()) { device_memory::copy_to_device( device_data(), host_data(), imaginary_stride() * 2); } } /// Copy data from a caller-supplied device pointer into host memory. void copy_in_device_to_host( Element const* ptr_device_real, ///< source device memory Element const* ptr_device_imag, ///< source device memory LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_to_host( host_data(), ptr_device_real, count); device_memory::copy_to_host( host_data_imag(), ptr_device_imag, count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_in_device_to_device( Element const* ptr_device_real, ///< source device memory Element const* ptr_device_imag, ///< source device memory LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_device_to_device( device_data(), ptr_device_real, count); device_memory::copy_device_to_device( device_data_imag(), ptr_device_imag, count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_in_host_to_device( Element const* ptr_host_real, ///< source host memory Element const* ptr_host_imag, ///< source host memory LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_to_device( device_data(), ptr_host_real, count); device_memory::copy_to_device( device_data_imag(), ptr_host_imag, count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_in_host_to_host( Element const* ptr_host_real, ///< source host memory Element const* ptr_host_imag, ///< source host memory LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_host_to_host( host_data(), ptr_host_real, count); device_memory::copy_host_to_host( host_data_imag(), ptr_host_imag, count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_out_device_to_host( Element * ptr_host_real, ///< source device memory Element * ptr_host_imag, ///< source device memory LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_to_host( ptr_host_real, device_data(), count); device_memory::copy_to_host( ptr_host_imag, device_data_imag(), count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_out_device_to_device( Element * ptr_device_real, ///< source device memory Element * ptr_device_imag, ///< source device memory LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_device_to_device( ptr_device_real, device_data(), count); device_memory::copy_device_to_device( ptr_device_imag, device_data_imag(), count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_out_host_to_device( Element * ptr_device_real, ///< source device memory Element * ptr_device_imag, ///< source device memory LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_to_device( ptr_device_real, host_data(), count); device_memory::copy_to_device( ptr_device_imag, host_data_imag(), count); } /// Copy data from a caller-supplied device pointer into host memory. void copy_out_host_to_host( Element * ptr_host_real, ///< source host memory Element * ptr_host_imag, ///< source host memory LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten. if (count < 0) { count = capacity(); } else { count = __NV_STD_MIN(capacity(), count); } device_memory::copy_host_to_host( ptr_host_real, host_data(), count); device_memory::copy_host_to_host( ptr_host_imag, host_data_imag(), count); } }; /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass
tools/util/include/cutlass/util/host_tensor_planar_complex.h/0
{ "file_path": "tools/util/include/cutlass/util/host_tensor_planar_complex.h", "repo_id": "tools", "token_count": 6640 }
64
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Defines host-side elementwise operations on TensorView. */ #pragma once // Standard Library includes #include <utility> // Cutlass includes #include "cutlass/cutlass.h" #include "cutlass/relatively_equal.h" #include "cutlass/util/distribution.h" #include "tensor_foreach.h" namespace cutlass { namespace reference { namespace device { /////////////////////////////////////////////////////////////////////////////////////////////////// namespace kernel { template <typename Element> __global__ void BlockCompareEqual( int *equal, Element const *ptr_A, Element const *ptr_B, size_t capacity) { size_t idx = threadIdx.x + blockDim.x * blockIdx.x; for (; idx < capacity; idx += gridDim.x * blockDim.x) { Element a = cutlass::ReferenceFactory<Element>::get(ptr_A, idx); Element b = cutlass::ReferenceFactory<Element>::get(ptr_B, idx); if (a != b) { *equal = 0; return; } } } template <typename Element> __global__ void BlockCompareRelativelyEqual( int *equal, Element const *ptr_A, Element const *ptr_B, size_t capacity, Element epsilon, Element nonzero_floor) { size_t idx = threadIdx.x + blockDim.x * blockIdx.x; for (; idx < capacity; idx += gridDim.x * blockDim.x) { Element a = cutlass::ReferenceFactory<Element>::get(ptr_A, idx); Element b = cutlass::ReferenceFactory<Element>::get(ptr_B, idx); if (!relatively_equal(a, b, epsilon, nonzero_floor)) { *equal = 0; return; } } } } // namespace kernel /////////////////////////////////////////////////////////////////////////////////////////////////// /// Performs a bit-level equality check between two blocks template <typename Element> bool BlockCompareEqual( Element const *ptr_A, Element const *ptr_B, size_t capacity, int grid_size = 0, int block_size = 0) { int equal_flag = 1; int *device_equal_flag = nullptr; if (cudaMalloc((void **)&device_equal_flag, sizeof(int)) != cudaSuccess) { throw std::runtime_error("Failed to allocate device flag."); } if (cudaMemcpy( device_equal_flag, &equal_flag, sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess) { throw std::runtime_error("Failed to copy equality flag to device."); } if (!grid_size || !block_size) { // if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API cudaError_t result = cudaOccupancyMaxPotentialBlockSize( &grid_size, &block_size, reinterpret_cast<void const *>(kernel::BlockCompareEqual<Element>)); if (result != cudaSuccess) { throw std::runtime_error("Failed to query occupancy."); } // Limit block size. This has the effect of increasing the number of items processed by a // single thread and reduces the impact of initialization overhead. block_size = (block_size < 128 ? block_size : 128); } dim3 grid(grid_size, 1, 1); dim3 block(block_size, 1, 1); kernel::BlockCompareEqual<Element><<< grid, block >>>(device_equal_flag, ptr_A, ptr_B, capacity); if (cudaMemcpy( &equal_flag, device_equal_flag, sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess) { cudaFree(device_equal_flag); throw std::runtime_error("Failed to copy equality flag from device."); } cudaFree(device_equal_flag); return equal_flag; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Performs a bit-level equality check between two blocks template <typename Element> bool BlockCompareRelativelyEqual( Element const *ptr_A, Element const *ptr_B, size_t capacity, Element epsilon, Element nonzero_floor, int grid_size = 0, int block_size = 0) { int equal_flag = 1; int *device_equal_flag = nullptr; if (cudaMalloc((void **)&device_equal_flag, sizeof(int)) != cudaSuccess) { throw std::runtime_error("Failed to allocate device flag."); } if (cudaMemcpy( device_equal_flag, &equal_flag, sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess) { throw std::runtime_error("Failed to copy equality flag to device."); } if (!grid_size || !block_size) { // if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API cudaError_t result = cudaOccupancyMaxPotentialBlockSize( &grid_size, &block_size, reinterpret_cast<void const *>(kernel::BlockCompareRelativelyEqual<Element>)); if (result != cudaSuccess) { throw std::runtime_error("Failed to query occupancy."); } // Limit block size. This has the effect of increasing the number of items processed by a // single thread and reduces the impact of initialization overhead. block_size = (block_size < 128 ? block_size : 128); } dim3 grid(grid_size, 1, 1); dim3 block(block_size, 1, 1); kernel::BlockCompareRelativelyEqual<Element><<< grid, block >>>( device_equal_flag, ptr_A, ptr_B, capacity, epsilon, nonzero_floor ); if (cudaMemcpy( &equal_flag, device_equal_flag, sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess) { cudaFree(device_equal_flag); throw std::runtime_error("Failed to copy equality flag from device."); } cudaFree(device_equal_flag); return equal_flag; } /////////////////////////////////////////////////////////////////////////////////////////////////// } // device } // reference } // cutlass
tools/util/include/cutlass/util/reference/device/tensor_compare.h/0
{ "file_path": "tools/util/include/cutlass/util/reference/device/tensor_compare.h", "repo_id": "tools", "token_count": 2337 }
65
![ALT](./media/images/gemm-hierarchy-with-epilogue-no-labels.png "Complete CUDA GEMM decomposition") # CUTLASS 3.5 _CUTLASS 3.5 - April 2024_ CUTLASS is a collection of CUDA C++ template abstractions for implementing high-performance matrix-matrix multiplication (GEMM) and related computations at all levels and scales within CUDA. It incorporates strategies for hierarchical decomposition and data movement similar to those used to implement cuBLAS and cuDNN. CUTLASS decomposes these "moving parts" into reusable, modular software components abstracted by C++ template classes. Primitives for different levels of a conceptual parallelization hierarchy can be specialized and tuned via custom tiling sizes, data types, and other algorithmic policy. The resulting flexibility simplifies their use as building blocks within custom kernels and applications. To support a wide variety of applications, CUTLASS provides extensive support for mixed-precision computations, providing specialized data-movement and multiply-accumulate abstractions for half-precision floating point (FP16), BFloat16 (BF16), Tensor Float 32 (TF32), single-precision floating point (FP32), [FP32 emulation via tensor core instruction](./examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm), double-precision floating point (FP64) types, integer data types (4b and 8b), and binary data types (1b). CUTLASS demonstrates warp-synchronous matrix multiply operations targeting the programmable, high-throughput _Tensor Cores_ implemented by NVIDIA's Volta, Turing, Ampere, and Hopper architectures. See the [Quick Start Guide](./media/docs/quickstart.md) to get started quickly. See the [functionality listing](./media/docs/functionality.md) for the list of operations supported at each level of the execution model hierarchy. CUTLASS 3.0 introduced a new core library, CuTe, to describe and manipulate tensors of threads and data. CuTe is a collection of C++ CUDA template abstractions for defining and operating on hierarchically multidimensional layouts of threads and data. CuTe provides `Layout` and `Tensor` objects that compactly package the type, shape, memory space, and layout of data, while performing the complicated indexing for the user. This lets programmers focus on the logical descriptions of their algorithms while CuTe does the mechanical bookkeeping for them. With these tools, we can quickly design, implement, and modify all dense linear algebra operations. The core abstractions of CuTe are hierarchically multidimensional layouts which can be composed with data arrays to represent tensors. The representation of layouts is powerful enough to represent nearly everything we need to implement efficient dense linear algebra. Layouts can also be combined and manipulated via functional composition, on which we build a large set of common operations such as tiling and partitioning. CUTLASS 3.0 and beyond adopts CuTe throughout the GEMM hierarchy in its templates. This greatly simplifies the design and improves code composability and readability. More documentation specific to CuTe can be found in its [dedicated documentation directory](./media/docs/cute/00_quickstart.md). In addition to GEMMs, CUTLASS implements high-performance convolution via the implicit GEMM algorithm. Implicit GEMM is the formulation of a convolution operation as a GEMM thereby taking advantage of CUTLASS's modular GEMM pipeline. This allows CUTLASS to build convolutions by reusing highly-optimized GEMM components. # What's New in CUTLASS 3.5 CUTLASS 3.5 is an update to CUTLASS adding: - Implicit GEMM Convolutions targeting Hopper SM90A via WGMMA + [TMA im2col](./include/cute/atom/copy_traits_sm90_im2col.hpp). + Native implementation in CUTLASS 3.x using CuTe, mirroring the [same design hierarchy as that of GEMMs](./media/docs/gemm_api_3x.md). + Support for 1D, 2D, and 3D convolutions in a [rank-agnostic fashion](./include/cutlass/conv/convnd_problem_shape.hpp). + Support for [Fprop](./test/unit/conv/device_3x/fprop/sm90_conv3d_fprop_implicit_gemm_s8_s8_s32_tensorop_s32.cu), [Dgrad](./test/unit/conv/device_3x/dgrad/sm90_conv2d_dgrad_implicit_gemm_f16_f16_f32_tensorop_f16.cu), and [Wgrad](./test/unit/conv/device_3x/wgrad/sm90_conv1d_wgrad_implicit_gemm_f16_f16_f32_tensorop_f16.cu) algorithms. + [CUTLASS profiler support](./python/cutlass_library/conv3x_emitter.py) for 2D and 3D convolutions implemented via the 3.x API. + NOTE: this is a beta release. Further updates to CUTLASS will include major performance improvements, feature enablement, and possible breaking changes to the API until 3.7 release. Your feedback is welcome on the design! - Support for [Ada (SM89) FP8 tensor cores via the 2.x API](./examples/58_ada_fp8_gemm/ada_fp8_gemm.cu). Requires CUDA 12.4 or newer. - [Ampere gather/scatter convolution example](./examples/59_ampere_gather_scatter_gemm/README.md) in CuTe and CUTLASS 3.x. + Showcasing how custom kernels can be written and optimized using CUTLASS 3.x and CuTe and the general strategy for implementing convolutions as specializations of GETTs. + Implementation of a coarse grained sparse gather/scatter kernel achieving peak performance on Ampere class tensor cores. - 32x and 16x tile sizes are added to CUTLASS 2.x to improve the performance of narrow-tall and wide-short matrices. - Updates to CuTe documentation for [`cute::Tensor<>`](./media/docs/cute/03_tensor.md), [MMA atoms](./media/docs/cute/0t_mma_atom.md), and an overhauled [CuTe GEMM tutorial series](./examples/cute/tutorial). - Extensions to CuTe to support [L2 prefetching](./include/cute/algorithm/prefetch.hpp) and [TMA store+reductions](./include/cute/arch/copy_sm90_tma.hpp#L1337). - Remove C++11 requirement on a few CUTLASS 2.x API header files. All CUTLASS files now require C++17. - Fixes to greatly reduce build warnings. - Updates and bugfixes from the community (thanks!) Minimum requirements: - Architecture: Volta - Compiler: Must support at least C++17 - CUDA Toolkit version: 11.4 Starting from CUTLASS 3.0, CUTLASS removed support for the following: - Maxwell and Pascal GPU architectures - Ubuntu 16.04 - CUDA 10.2 - C++ language versions less than 17. **See the [CHANGELOG](CHANGELOG.md) for a detailed listing of releases and updates.** # Performance <p align="center"><img src=media/images/cutlass-3.1-gemm-peak-performance.png></p> CUTLASS primitives are very efficient. When used to construct device-wide GEMM kernels, they exhibit peak performance comparable to cuBLAS for scalar GEMM computations. The above figure shows CUTLASS performance relative to cuBLAS for large matrix dimensions on an [NVIDIA H100](https://www.nvidia.com/en-us/data-center/h100/) (NVIDIA Hopper architecture), an [NVIDIA L40](https://www.nvidia.com/en-us/data-center/l40/) (NVIDIA Ada architecture), an [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) (NVIDIA Ampere architecture), and an [NVIDIA A40](https://www.nvidia.com/en-us/data-center/a40/) (NVIDIA Ampere architecture). CUTLASS 3.0 was compiled with the [CUDA 12.0 Toolkit](https://developer.nvidia.com/cuda-downloads). Tensor Core operations are implemented using CUDA's [mma](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-mma) and [wgmma](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#asynchronous-warpgroup-level-matrix-instructions) instructions. <p align="center"><img src=media/images/cutlass-2.9-implicit-gemm-performance.png></p> When using CUTLASS building blocks to construct device-wide implicit gemm (Fprop, Dgrad, and Wgrad) kernels, CUTLASS performance is also comparable to cuDNN when running Resnet-50 layers on an [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) as shown in the above figure. Tensor Core operations are implemented using CUDA's [mma instruction](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-mma). # Compatibility CUTLASS requires a C++17 host compiler and performs best when built with the [**CUDA 12.4 Toolkit**](https://developer.nvidia.com/cuda-downloads). It is also compatible with CUDA 11.4, CUDA 11.5, CUDA 11.6, CUDA 11.7, CUDA 11.8, CUDA 12.0, CUDA 12.1, CUDA 12.2.2, CUDA 12.3.1 and CUDA 12.3.2. ## Operating Systems We have tested the following environments. |**Operating System** | **Compiler** | |-----------------|----------| | Ubuntu 18.04 | GCC 7.5.0 | | Ubuntu 20.04 | GCC 10.3.0 | | Ubuntu 22.04 | GCC 11.2.0 | | Ubuntu 22.04 | Clang 10.0.0 | | Ubuntu 22.04 | Clang 14.0.6 | | Ubuntu 22.04 | Clang 17.0.6 | | Windows 10.0 | Visual Studio 2019 v16.11.27 | Note: GCC 8.5.0 has known regressions regarding fold expressions and overloaded operators. Using GCC 7.5.0 or (preferred) GCC >= 9 is recommended. ## Hardware CUTLASS runs successfully on the following NVIDIA GPUs, and it is expected to be efficient on Volta, Turing, Ampere, Ada, and Hopper architecture based NVIDIA GPUs. |**GPU**|**CUDA Compute Capability**|**Minimum CUDA Toolkit Required by CUTLASS-3**| |---|---|---| |NVIDIA V100 Tensor Core GPU |7.0|11.4| |NVIDIA TitanV |7.0|11.4| |NVIDIA GeForce RTX 2080 TI, 2080, 2070 |7.5|11.4| |NVIDIA T4 |7.5|11.4| |NVIDIA A100 Tensor Core GPU |8.0|11.4| |NVIDIA A10 |8.6|11.4| |NVIDIA GeForce RTX 3090 |8.6|11.4| |NVIDIA GeForce RTX 4090 |8.9|11.8| |NVIDIA L40 |8.9|11.8| |NVIDIA H100 Tensor Core GPU |9.0|11.8| ## Target Architecture In general, PTX code generated for one target architecture can be run on future architectures (i.e., it is forward compatible). However, CUDA 12.0 introduced the concept of "architecture-accelerated features" whose PTX does not have forward compatibility guarantees. Several Hopper PTX instructions fall under this category of architecture-accelerated features, and thus require a `sm_90a` target architecture (note the "a" appended). For more details on this and other architecture-accelerated instructions, please refer to the [CUDA Documentation](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#feature-availability). The target architecture information is passed on to CUTLASS via the cmake flag `CUTLASS_NVCC_ARCHS`. In order to maximize performance on Hopper GH100, users are required to build CUTLASS with `90a` as the target architecture. If a user accidentally builds a kernel which uses SM90a features (e.g. Hopper Tensor Core Instructions), using the SM90 target (note the lack of "a"), with either CTK 12 or 11.8, the kernel is expected to fail with a runtime error. ``` cmake .. -DCUTLASS_NVCC_ARCHS="90a" ``` Please refer to the [functionality documentation](./media/docs/functionality.md) for details on which kernels require which target architectures. # Documentation CUTLASS is described in the following documents and the accompanying [Doxygen documentation](https://nvidia.github.io/cutlass). - [Quick Start Guide](./media/docs/quickstart.md) - build and run CUTLASS - [Functionality](./media/docs/functionality.md) - summarizes functionality available in CUTLASS - [Efficient GEMM in CUDA](./media/docs/efficient_gemm.md) - describes how GEMM kernels may be implemented efficiently in CUDA - [CUTLASS 3.x Design](./media/docs/cutlass_3x_design.md) - describes the CUTLASS 3.x design, its benefits, and how CuTe enables us to write much more composable components - [GEMM API 3.x](./media/docs/gemm_api_3x.md) - describes the CUTLASS 3.x GEMM model and C++ template concepts - [GEMM API 2.x](./media/docs/gemm_api.md) - describes the CUTLASS 2.x GEMM model and C++ template concepts - [Implicit GEMM Convolution](./media/docs/implicit_gemm_convolution.md) - describes 2-D and 3-D convolution in CUTLASS - [Code Organization](./media/docs/code_organization.md) - describes the organization and contents of the CUTLASS project - [Terminology](./media/docs/terminology.md) - describes terms used in the code - [Programming Guidelines](./media/docs/programming_guidelines.md) - guidelines for writing efficient modern CUDA C++ - [Fundamental types](./media/docs/fundamental_types.md) - describes basic C++ classes used in CUTLASS to represent numeric quantities and arrays - [Layouts](./media/docs/layout.md) - describes layouts of matrices and tensors in memory - [Tile Iterators](./media/docs/tile_iterator_concept.md) - describes C++ concepts for iterating over tiles of matrices in memory - [CUTLASS Profiler](./media/docs/profiler.md) - command-line driven profiling application - [CUTLASS Utilities](./media/docs/utilities.md) - additional templates used to facilate rapid development # Resources We have also described the structure of an efficient GEMM in our talk at the [GPU Technology Conference 2018](http://on-demand.gputechconf.com/gtc/2018/presentation/s8854-cutlass-software-primitives-for-dense-linear-algebra-at-all-levels-and-scales-within-cuda.pdf). - [CUTLASS: Software Primitives for Dense Linear Algebra at All Levels and Scales within CUDA](https://www.nvidia.com/en-us/on-demand/session/gtcsiliconvalley2018-s8854/) - [Developing CUDA Kernels to Push Tensor Cores to the Absolute Limit on NVIDIA A100](https://www.nvidia.com/en-us/on-demand/session/gtcsj20-s21745/) - [Accelerating Convolution with Tensor Cores in CUTLASS](https://www.nvidia.com/en-us/on-demand/session/gtcspring21-s31883/) - [Accelerating Backward Data Gradient by Increasing Tensor Core Utilization in CUTLASS](https://www.nvidia.com/en-us/on-demand/session/gtcspring22-s41996/) - [CUTLASS: Python API, Enhancements, and NVIDIA Hopper](https://www.nvidia.com/en-us/on-demand/session/gtcfall22-a41131/) # Building CUTLASS CUTLASS is a header-only template library and does not need to be built to be used by other projects. Client applications should target CUTLASS's `include/` directory in their include paths. CUTLASS unit tests, examples, and utilities can be build with CMake. The minimum version of CMake is given in the [Quickstart guide](./media/docs/quickstart.md). Make sure the `CUDACXX` environment variable points to NVCC in the CUDA Toolkit installed on your system. ```bash $ export CUDACXX=${CUDA_INSTALL_PATH}/bin/nvcc ``` Create a build directory within the CUTLASS project, then run CMake. By default CUTLASS will build kernels for CUDA architecture versions 5.0, 6.0, 6.1, 7.0, 7.5, 8.0, 8.6, 8.9, and 9.0. To reduce compile time you can specify the architectures to build CUTLASS for by changing the CMake configuration setting `CUTLASS_NVCC_ARCHS`. ```bash $ mkdir build && cd build $ cmake .. -DCUTLASS_NVCC_ARCHS=80 # compiles for NVIDIA's Ampere Architecture ``` From the `build/` directory, compile and run the CUTLASS unit tests by building the target `test_unit` with make. The unit tests are organized as several binaries mirroring the top-level namespaces of CUTLASS, and they may be executed in parallel via make's `-j` command line argument. ```bash $ make test_unit -j ... ... ... [----------] Global test environment tear-down [==========] 946 tests from 57 test cases ran. (10812 ms total) [ PASSED ] 946 tests. ``` All tests should pass on supported platforms, though the exact number of tests may vary over time. # Project Structure CUTLASS is arranged as a header-only library along with Utilities, Tools, Examples, and unit tests. [Doxygen documentation](https://nvidia.github.io/cutlass) provides a complete list of files, classes, and template concepts defined in the CUTLASS project. A detailed explanation of the source code organization may be found in the [CUTLASS documentation](./media/docs/code_organization.md), but several main components are summarized below. ## CUTLASS Template Library ``` include/ # client applications should target this directory in their build's include paths cutlass/ # CUDA Templates for Linear Algebra Subroutines and Solvers - headers only arch/ # direct exposure of architecture features (including instruction-level GEMMs) conv/ # code specialized for convolution epilogue/ # code specialized for the epilogue of gemm/convolution gemm/ # code specialized for general matrix product computations layout/ # layout definitions for matrices, tensors, and other mathematical objects in memory platform/ # CUDA-capable Standard Library components reduction/ # bandwidth-limited reduction kernels that do not fit the "gemm" model thread/ # simt code that can be performed within a CUDA thread transform/ # code specialized for layout, type, and domain transformations * # core vocabulary types, containers, and basic numeric operations cute/ # CuTe Layout, layout algebra, MMA/Copy atoms, tiled MMA/Copy algorithm/ # Definitions of core operations such as copy, gemm, and operations on cute::tuples arch/ # Bare bones PTX wrapper structs for copy and math instructions atom/ # Meta-information either link to or built from arch/ operators mma_atom.hpp # cute::Mma_Atom and cute::TiledMma copy_atom.hpp # cute::Copy_Atom and cute::TiledCopy *sm*.hpp # Arch specific meta-information for copy and math operations * # Core library types such as Shape, Stride, Layout, Tensor, and associated operations ``` ### CUTLASS SDK Examples [CUTLASS SDK examples](./examples) apply CUTLASS templates to implement basic computations. ### Tools ``` tools/ library/ # CUTLASS Instance Library - contains instantiations of all supported CUTLASS templates include/ cutlass/ library/ profiler/ # CUTLASS Profiler - command-line utility for executing operations in the # CUTLASS Library util/ # CUTLASS Utilities - contains numerous helper classes for include/ # manging tensors in device memory, reference cutlass/ # implementations for GEMM, random initialization util/ # of tensors, and I/O. ``` ### Test The `test/unit/` directory consist of unit tests implemented with Google Test that demonstrate basic usage of Core API components and complete tests of the CUTLASS GEMM computations. Instructions for building and running the Unit tests are described in the [Quickstart guide](./media/docs/quickstart.md). # Performance Profiling The `tools/profiler/` directory contains a command-line utility for launching each of the GEMM kernels. It can be built as follows: ```bash $ make cutlass_profiler -j16 ``` ## Building all GEMM and Convolution kernels (_long_ build times) By default, only one tile size is instantiated for each data type, math instruction, and layout. To instantiate all, set the following environment variable when running CMake from an empty `build/` directory. Beware, this results in *tens of thousands* of kernels and long build times. This would also result in a large binary size and on some platforms linker to fail on building the library. Therefore, it's highly recommended to generate only a subset of kernels as demonstrated in the sub-section below. ```bash $ cmake .. -DCUTLASS_NVCC_ARCHS=90a -DCUTLASS_LIBRARY_KERNELS=all ... $ make cutlass_profiler -j16 ``` ## Building a subset of GEMM and Convolution kernels (_reduced_ build times) To compile strictly one kernel or a small set of kernels, a comma-delimited list of kernel names with wildcard characters may be used to reduce the set of kernels. The following examples show building exactly one or a subset of kernels for NVIDIA Ampere and Turing architecture: ### Building a subset Tensor Core GEMM kernels To compile a subset of Tensor Core GEMM kernels with FP32 accumulation and FP16 input targeting NVIDIA Ampere and Turing architecture, use the below cmake command line: ```bash $ cmake .. -DCUTLASS_NVCC_ARCHS='75;80' -DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_s*gemm_f16_*_nt_align8 ... $ make cutlass_profiler -j16 ``` Example command line for profiling a subset of Tensor Core GEMM kernels is as follows: ```bash ./tools/profiler/cutlass_profiler --kernels=cutlass_tensorop_s*gemm_f16_*_nt_align8 --m=3456 --n=4096 --k=4096 ... ============================= Problem ID: 1 Provider: CUTLASS OperationKind: gemm Operation: cutlass_tensorop_s1688gemm_f16_256x128_32x2_nt_align8 Status: Success Verification: ON Disposition: Passed reference_device: Passed cuBLAS: Passed Arguments: --gemm_kind=universal --m=3456 --n=4096 --k=4096 --A=f16:column --B=f16:row --C=f32:column --alpha=1 \ --beta=0 --split_k_slices=1 --batch_count=1 --op_class=tensorop --accum=f32 --cta_m=256 --cta_n=128 \ --cta_k=32 --stages=2 --warps_m=4 --warps_n=2 --warps_k=1 --inst_m=16 --inst_n=8 --inst_k=8 --min_cc=75 \ --max_cc=1024 Bytes: 118489088 bytes FLOPs: 115992428544 flops Runtime: 1.55948 ms Memory: 70.7616 GiB/s Math: 74378.8 GFLOP/s ============================= ... ``` ### Building one CUDA Core GEMM kernel To compile one SGEMM kernel targeting NVIDIA Ampere and Turing architecture, use the below cmake command line: ```bash $ cmake .. -DCUTLASS_NVCC_ARCHS='75;80' -DCUTLASS_LIBRARY_KERNELS=cutlass_simt_sgemm_128x128_8x2_nn_align1 ... $ make cutlass_profiler -j16 ``` Example command line for profiling single SGEMM CUDA kernel is as follows: ```bash $ ./tools/profiler/cutlass_profiler --kernels=sgemm --m=3456 --n=4096 --k=4096 ============================= Problem ID: 1 Provider: CUTLASS OperationKind: gemm Operation: cutlass_simt_sgemm_128x128_8x2_nn_align1 Status: Success Verification: ON Disposition: Passed cuBLAS: Passed Arguments: --m=3456 --n=4096 --k=4096 --A=f32:column --B=f32:column --C=f32:column --alpha=1 --beta=0 --split_k_slices=1 \ --batch_count=1 --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 --stages=2 --warps_m=4 \ --warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 --max_cc=1024 Bytes: 180355072 bytes FLOPs: 115992428544 flops Runtime: 6.73655 ms Memory: 24.934 GiB/s Math: 17218.4 GFLOP/s ============================= ``` ### Building a subset of Tensor Core Convolution kernels To compile a subset of Tensor core convolution kernels implementing forward propagation (fprop) with FP32 accumulation and FP16 input targeting NVIDIA Ampere and Turing architecture, use the below cmake command line: ```bash $ cmake .. -DCUTLASS_NVCC_ARCHS='75;80' -DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_s*fprop_optimized_f16 ... $ make cutlass_profiler -j16 ``` Example command line for profiling a subset of Tensor Core convolution kernels is as follows: ```bash $ ./tools/profiler/cutlass_profiler --kernels=cutlass_tensorop_s*fprop_optimized_f16 --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 ... ============================= Problem ID: 1 Provider: CUTLASS OperationKind: conv2d Operation: cutlass_tensorop_s16816fprop_optimized_f16_128x128_32x5_nhwc Status: Success Verification: ON Disposition: Passed reference_device: Passed Arguments: --conv_kind=fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --p=224 --q=224 --pad_h=1 --pad_w=1 \ --stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 --Activation=f16:nhwc --Filter=f16:nhwc --Output=f32:nhwc \ --conv_mode=cross --iterator_algorithm=optimized --alpha=1 --beta=0 --split_k_mode=serial --split_k_slices=1 \ --eq_gemm_provider=none --op_class=tensorop --accum=f32 --cta_m=128 --cta_n=128 --cta_k=32 --stages=5 \ --warps_m=2 --warps_n=2 --warps_k=1 --inst_m=16 --inst_n=8 --inst_k=16 --min_cc=80 --max_cc=1024 Bytes: 1130659840 bytes FLOPs: 118482796544 flops Runtime: 0.711496 ms Memory: 1479.99 GiB/s Math: 166526 GFLOP/s ============================= ... ``` ### Building one Convolution CUDA kernel To compile and run one CUDA Core convolution kernel implementing forward propagation (fprop) with F32 accumulation and FP32 input targeting NVIDIA Ampere and Turing architecture, use the below cmake command line: ```bash $ cmake .. -DCUTLASS_NVCC_ARCHS='75;80' -DCUTLASS_LIBRARY_KERNELS=cutlass_simt_sfprop_optimized_128x128_8x2_nhwc ... $ make cutlass_profiler -j16 ``` Example command line for profiling one CUDA Core convolution kernel: ```bash $ ./tools/profiler/cutlass_profiler --kernels=cutlass_simt_sfprop_optimized_128x128_8x2_nhwc --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 ============================= Problem ID: 1 Provider: CUTLASS OperationKind: conv2d Operation: cutlass_simt_sfprop_optimized_128x128_8x2_nhwc Status: Success Verification: ON Disposition: Passed reference_device: Passed Arguments: --conv_kind=fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --p=224 --q=224 --pad_h=1 --pad_w=1 \ --stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 --Activation=f32:nhwc --Filter=f32:nhwc --Output=f32:nhwc \ --conv_mode=cross --iterator_algorithm=optimized --alpha=1 --beta=0 --split_k_mode=serial --split_k_slices=1 \ --eq_gemm_provider=none --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 --stages=2 --warps_m=4 \ --warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 --max_cc=1024 Bytes: 2055798784 bytes FLOPs: 118482796544 flops Runtime: 7.34266 ms Memory: 260.752 GiB/s Math: 16136.2 GFLOP/s ============================= ``` ## More Details on Compiling CUTLASS Kernels and CUTLASS Profiler - Please follow the links for more CMake examples on selectively compiling CUTLASS kernels: - [GEMM CMake Examples](./media/docs/quickstart.md#gemm-cmake-examples) - [Implicit GEMM convolution CMake Examples](./media/docs/quickstart.md#convolution-cmake-examples) - [Further details about the CUTLASS Profiler are described here.](./media/docs/profiler.md) # About CUTLASS is released by NVIDIA Corporation as Open Source software under the [3-clause "New" BSD license](LICENSE.txt). # Contributors The official list of CUTLASS developers and contributors is available here: [CONTRIBUTORS](CONTRIBUTORS.md). # Copyright Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: BSD-3-Clause ``` Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ```
README.md/0
{ "file_path": "README.md", "repo_id": "README.md", "token_count": 10030 }
0
function toggleVisibility(linkObj) { var base = $(linkObj).attr('id'); var summary = $('#'+base+'-summary'); var content = $('#'+base+'-content'); var trigger = $('#'+base+'-trigger'); var src=$(trigger).attr('src'); if (content.is(':visible')===true) { content.hide(); summary.show(); $(linkObj).addClass('closed').removeClass('opened'); $(trigger).attr('src',src.substring(0,src.length-8)+'closed.png'); } else { content.show(); summary.hide(); $(linkObj).removeClass('closed').addClass('opened'); $(trigger).attr('src',src.substring(0,src.length-10)+'open.png'); } return false; } function updateStripes() { $('table.directory tr'). removeClass('even').filter(':visible:even').addClass('even'); } function toggleLevel(level) { $('table.directory tr').each(function() { var l = this.id.split('_').length-1; var i = $('#img'+this.id.substring(3)); var a = $('#arr'+this.id.substring(3)); if (l<level+1) { i.removeClass('iconfopen iconfclosed').addClass('iconfopen'); a.html('&#9660;'); $(this).show(); } else if (l==level+1) { i.removeClass('iconfclosed iconfopen').addClass('iconfclosed'); a.html('&#9658;'); $(this).show(); } else { $(this).hide(); } }); updateStripes(); } function toggleFolder(id) { // the clicked row var currentRow = $('#row_'+id); // all rows after the clicked row var rows = currentRow.nextAll("tr"); var re = new RegExp('^row_'+id+'\\d+_$', "i"); //only one sub // only match elements AFTER this one (can't hide elements before) var childRows = rows.filter(function() { return this.id.match(re); }); // first row is visible we are HIDING if (childRows.filter(':first').is(':visible')===true) { // replace down arrow by right arrow for current row var currentRowSpans = currentRow.find("span"); currentRowSpans.filter(".iconfopen").removeClass("iconfopen").addClass("iconfclosed"); currentRowSpans.filter(".arrow").html('&#9658;'); rows.filter("[id^=row_"+id+"]").hide(); // hide all children } else { // we are SHOWING // replace right arrow by down arrow for current row var currentRowSpans = currentRow.find("span"); currentRowSpans.filter(".iconfclosed").removeClass("iconfclosed").addClass("iconfopen"); currentRowSpans.filter(".arrow").html('&#9660;'); // replace down arrows by right arrows for child rows var childRowsSpans = childRows.find("span"); childRowsSpans.filter(".iconfopen").removeClass("iconfopen").addClass("iconfclosed"); childRowsSpans.filter(".arrow").html('&#9658;'); childRows.show(); //show all children } updateStripes(); } function toggleInherit(id) { var rows = $('tr.inherit.'+id); var img = $('tr.inherit_header.'+id+' img'); var src = $(img).attr('src'); if (rows.filter(':first').is(':visible')===true) { rows.css('display','none'); $(img).attr('src',src.substring(0,src.length-8)+'closed.png'); } else { rows.css('display','table-row'); // using show() causes jump in firefox $(img).attr('src',src.substring(0,src.length-10)+'open.png'); } }
docs/dynsections.js/0
{ "file_path": "docs/dynsections.js", "repo_id": "docs", "token_count": 1176 }
1
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Planar Complex GEMM This example demonstrates the CUTLASS Library's exposure of planar complex GEMM kernels supporting the batched strided mode. These kernels represent complex matrices by storing the real and imaginary parts of the matrix in disjoint regions in memory. These real-valued matrices are stored using existing cuBLAS layouts as either column-major or row-major layouts with a single leading dimension indicating the stride between columns or rows. The CUTLASS Library collects multiple template instantiations in a data structure and offers a BLAS-like dispatch API to invoke the appropriate kernel on the Volta or Turing architectures. CUTLASS decouples matrix layout from complex transformation, so four possible transformations are possible on the A and B operands: n: column-major c: column-major complex conjugate t: row-major h: row-major complex conjugate The CUTLASS Library contains many kernel instances specialized for architecture, data type, tile size, and alignment. This can result in long compile times. To build strictly the planar complex kernels needed for general application, execute the following CMake command in an empty build directory. $ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" \ -DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_*gemm_planar_complex This builds all planar complex GEMM variants for Volta and Turing architectures. To build strictly the kernels needed for this example, an even narrower filter string may be specified as follows. This only builds planar complex GEMMs targeting Tensor Cores for the 'CN' layout configuration (conjugate A operand with both A and B as column-major). $ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" \ -DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_f16_s*gemm_planar_complex_f16*cn $ make 10_planar_complex $ ./examples/10_planar_complex/10_planar_complex --m=2048 --n=1024 --k=512 --batch=10 */ #include <iostream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/util/command_line.h" #include "cutlass/util/distribution.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/host_tensor_planar_complex.h" #include "cutlass/util/reference/device/tensor_fill.h" #include "cutlass/util/reference/device/gemm_planar_complex.h" #include "cutlass/util/reference/device/tensor_compare.h" #include "cutlass/library/handle.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double runtime_ms; double gflops; cutlass::Status status; cudaError_t error; bool passed; // // Methods // Result( double runtime_ms = 0, double gflops = 0, cutlass::Status status = cutlass::Status::kSuccess, cudaError_t error = cudaSuccess ): runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::gemm::GemmCoord problem_size; int batch_count; cutlass::complex<float> alpha; cutlass::complex<float> beta; bool reference_check; int iterations; Options(): help(false), problem_size({1024, 1024, 1024}), batch_count(1), reference_check(true), iterations(20), alpha(1), beta() { } bool valid() { return true; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); cmd.get_cmd_line_argument("k", problem_size.k()); cmd.get_cmd_line_argument("batch", batch_count); cmd.get_cmd_line_argument("alpha", alpha.real()); cmd.get_cmd_line_argument("alpha_i", alpha.imag()); cmd.get_cmd_line_argument("beta", beta.real()); cmd.get_cmd_line_argument("beta_i", beta.imag()); cmd.get_cmd_line_argument("iterations", iterations); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "10_planar_complex example\n\n" << " This example uses the CUTLASS Library to execute Planar Complex GEMM computations.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --m=<int> GEMM M dimension\n" << " --n=<int> GEMM N dimension\n" << " --k=<int> GEMM K dimension\n" << " --batch=<int> Number of GEMM operations executed in one batch\n" << " --alpha=<f32> Epilogue scalar alpha (real part)\n" << " --alpha_i=<f32> Epilogue scalar alpha (imaginary part)\n" << " --beta=<f32> Epilogue scalar beta (real part)\n\n" << " --beta_i=<f32> Epilogue scalar beta (imaginary part)\n\n" << " --iterations=<int> Number of profiling iterations to perform.\n\n"; out << "\n\nExamples:\n\n" << "$ ./examples/10_planar_complex/10_planar_complex --batch=7 --m=1024 --n=512 --k=1024 \\\n" << " --alpha=2 --alpha_i=-2 --beta=0.707 --beta_i=-.707\n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of real-valued multiply-adds int64_t fmas = problem_size.product() * batch_count * 4; // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// /// Performance test environment for planar complex class TestbedPlanarComplex { public: using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = cutlass::half_t; using LayoutC = cutlass::layout::ColumnMajor; using ElementCompute = float; using ElementAccumulator = float; // // Data members // cutlass::library::Handle handle; cutlass::gemm::GemmCoord problem_size; int batch_count; cutlass::DeviceAllocation<ElementA> tensor_A; cutlass::DeviceAllocation<ElementB> tensor_B; cutlass::DeviceAllocation<ElementC> tensor_C; cutlass::DeviceAllocation<ElementC> tensor_D; cutlass::DeviceAllocation<ElementC> tensor_D_ref; // // Methods // TestbedPlanarComplex( Options const &options ): problem_size(options.problem_size), batch_count(options.batch_count) { // Allocate device memory for batched strided GEMM tensor_A.reset(int64_t(problem_size.m()) * problem_size.k() * batch_count * 2); tensor_B.reset(int64_t(problem_size.k()) * problem_size.n() * batch_count * 2); tensor_C.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2); tensor_D.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2); tensor_D_ref.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2); } void initialize() { uint64_t seed = 1073; // Use small integers to simplify correctness checking int scope_max = 6; int scope_min = -6; cutlass::reference::device::BlockFillRandomUniform( tensor_A.get(), tensor_A.size(), seed, ElementA(scope_max), ElementA(scope_min), 0); cutlass::reference::device::BlockFillRandomUniform( tensor_B.get(), tensor_B.size(), seed * 2019, ElementB(scope_max), ElementB(scope_min), 0); cutlass::reference::device::BlockFillRandomUniform( tensor_C.get(), tensor_C.size(), seed * 2020, ElementC(scope_max), ElementC(scope_min), 0); } Result profile(Options const &options) { Result result; initialize(); ElementA *ptr_A = tensor_A.get(); ElementB *ptr_B = tensor_B.get(); ElementC *ptr_C = tensor_C.get(); ElementC *ptr_D = tensor_D.get(); int64_t batch_stride_A = int64_t(problem_size.m()) * problem_size.k() * 2; int64_t batch_stride_B = int64_t(problem_size.k()) * problem_size.n() * 2; int64_t batch_stride_C = int64_t(problem_size.m()) * problem_size.n() * 2; int64_t batch_stride_D = int64_t(problem_size.m()) * problem_size.n() * 2; typename LayoutA::Stride::Index lda = LayoutA::packed({problem_size.m(), problem_size.k()}).stride(0); typename LayoutB::Stride::Index ldb = LayoutB::packed({problem_size.k(), problem_size.n()}).stride(0); typename LayoutC::Stride::Index ldc = LayoutC::packed({problem_size.m(), problem_size.n()}).stride(0); typename LayoutC::Stride::Index ldd = LayoutC::packed({problem_size.m(), problem_size.n()}).stride(0); int64_t imag_stride_A = int64_t(problem_size.m()) * problem_size.k(); int64_t imag_stride_B = int64_t(problem_size.k()) * problem_size.n(); int64_t imag_stride_C = int64_t(problem_size.m()) * problem_size.n(); int64_t imag_stride_D = int64_t(problem_size.m()) * problem_size.n(); // // Construct events // cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return -1; } } // Record an event at the start of a series of GEMMs result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // // Run profiling loop // for (int iter = 0; iter < options.iterations; ++iter) { // // Execute the planar complex GEMM kernel via the CUTLASS Library's // dispatch routines. // // Note, for planar complex GEMM kernels, all numeric type arguments // specify the data type of the base real types. These are understood to // apply to planar complex representations of matrices in memory and to complex<T> // structures for scalars. // // See tools/library/include/cutlass/library/handle.h for more details. // result.status = handle.gemm_planar_complex( problem_size.m(), // GEMM M dimension problem_size.n(), // GEMM N dimension problem_size.k(), // GEMM K dimension cutlass::library::NumericTypeID::kF32, // Base data type of complex-valued accumulation cutlass::library::NumericTypeID::kF32, // Base data type of complex-valued alpha/beta scalars &options.alpha, // Pointer to alpha scalar, of type complex<T> cutlass::library::NumericTypeID::kF16, // Base data type of complex-valued A matrix cutlass::library::LayoutTypeID::kColumnMajor, // Layout of A matrix cutlass::library::ComplexTransform::kConjugate, // Complex transformation on A matrix operand ptr_A, // Pointer to real part of A matrix ptr_A + imag_stride_A, // Pointer to imaginary part of A matrix lda, // Leading dimension of real part of A matrix lda, // Leading dimension of imaginary part of A matrix cutlass::library::NumericTypeID::kF16, // Base data type of complex-valued B matrix cutlass::library::LayoutTypeID::kColumnMajor, // Layout of B matrix cutlass::library::ComplexTransform::kNone, // Complex transformation on B matrix operand ptr_B, // Pointer to real part of B matrix ptr_B + imag_stride_B, // Pointer to imaginary part of B matrix ldb, // Leading dimension of real part of B matrix ldb, // Leading dimension of imaginary part of B matrix &options.beta, // Pointer to beta scalar, of type complex<T> cutlass::library::NumericTypeID::kF16, // Base data type of complex valued C and D matrices ptr_C, // Pointer to real part of C matrix ptr_C + imag_stride_C, // Pointer to imaginary part of C matrix ldc, // Leading dimension of real part of C matrix ldc, // Leading dimension of imaginary part of C matrix ptr_D, // Pointer to real part of D matrix ptr_D + imag_stride_D, // Pointer to imaginary part of D matrix ldd, // Leading dimension of real part of D matrix ldd, // Leading dimension of imaginary part of D matrix batch_count, // Number of batched elements batch_stride_A, // Stride between batches of real parts of A matrix batch_stride_A, // Stride between batches of imaginary parts of A matrix batch_stride_B, // Stride between batches of real parts of B matrix batch_stride_B, // Stride between batches of imaginary parts of B matrix batch_stride_C, // Stride between batches of real parts of C matrix batch_stride_C, // Stride between batches of imaginary parts of C matrix batch_stride_D, // Stride between batches of real parts of D matrix batch_stride_D // Stride between batches of imaginary parts of D matrix ); if (result.status != cutlass::Status::kSuccess) { std::cerr << "CUTLASS internal error - configuration not supported" << std::endl; return result; } } // // Stop profiling loop // // Record an event when the GEMMs are complete result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Compute average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } if (handle.get_last_operation()) { std::cout << "Recently executed '" << handle.get_last_operation()->description().name << "'" << std::endl; } // // Compute reference in device code // if (options.reference_check) { result.passed = true; for (int64_t idx = 0; result.passed && idx < int64_t(batch_count); ++idx) { cutlass::reference::device::GemmPlanarComplex< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator >( problem_size, options.alpha, {tensor_A.get() + idx * batch_stride_A, lda, imag_stride_A}, cutlass::ComplexTransform::kConjugate, {tensor_B.get() + idx * batch_stride_B, ldb, imag_stride_B}, cutlass::ComplexTransform::kNone, options.beta, {tensor_C.get() + idx * batch_stride_C, ldc, imag_stride_C}, {tensor_D_ref.get() + idx * batch_stride_D, ldd, imag_stride_D} ); ElementC epsilon = 0.1_hf; ElementC nonzero_floor = 0.1_hf; result.passed = cutlass::reference::device::BlockCompareRelativelyEqual( tensor_D.get() + idx * batch_stride_D, tensor_D_ref.get() + idx * batch_stride_D, batch_stride_D, epsilon, nonzero_floor ); } if (result.passed) { std::cout << "Reference check passed." << std::endl; } else { std::cerr << "Error - reference check failed." << std::endl; } } std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl; std::cout << " GFLOPs: " << result.gflops << std::endl; return result; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { // // This example uses mma.sync to directly access Tensor Cores to achieve peak performance. // // Volta Tensor Core operations are first available in CUDA 10.1 Toolkit. // // Turing Tensor Core operations are first available in CUDA 10.2 Toolkit. // cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (props.major < 7) { std::cerr << "Volta Tensor Core operations must be run on a machine with compute capability at least 70." << std::endl; // Returning zero so this test passes on older architectures even though its actions are no-op. return 0; } else if (props.major == 7 && props.minor <= 2) { // // If running on the Volta architecture, at least CUDA 10.1 Toolkit is required to run this example. // if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) { std::cerr << "Volta Tensor Core operations must be compiled with CUDA 10.1 Toolkit or later." << std::endl; // Returning zero so this test passes on older Toolkits even though its actions are no-op. return 0; } } else if (props.major == 7 && props.minor >= 5) { // // If running on the Turing architecture, at least CUDA 10.2 Toolkit is required to run this example. // if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl; // Returning zero so this test passes on older Toolkits even though its actions are no-op. return 0; } } else { // NVIDIA Ampere Architecture GPUs (SM80 and later) are fully supported on CUDA 11 Toolkit and beyond. // // fall through } // // Parse options // Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } TestbedPlanarComplex testbed(options); Result result = testbed.profile(options); return result.passed ? 0 : -1; } /////////////////////////////////////////////////////////////////////////////////////////////////
examples/10_planar_complex/planar_complex.cu/0
{ "file_path": "examples/10_planar_complex/planar_complex.cu", "repo_id": "examples", "token_count": 8729 }
2
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a double-buffered threadblock-scoped GEMM kernel. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape0_, /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape1_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy0_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy1_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class B2bMmaBase { public: ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape0 = Shape0_; using Shape1 = Shape1_; ///< Policy describing tuning details using Policy0 = Policy0_; using Policy1 = Policy1_; // // Dependent types // /// Warp-level Mma using Operator0 = typename Policy0::Operator; using Operator1 = typename Policy1::Operator; /// Shape describing the overall GEMM computed from shared memory /// by each warp. using WarpGemm0 = typename Policy0::Operator::Shape; using WarpGemm1 = typename Policy1::Operator::Shape; /// Shape describing the number of warps filling the CTA using WarpCount0 = GemmShape<Shape0::kM / WarpGemm0::kM, Shape0::kN / WarpGemm0::kN, Shape0::kK / WarpGemm0::kK>; using WarpCount1 = GemmShape<Shape1::kM / WarpGemm1::kM, Shape1::kN / WarpGemm1::kN, Shape1::kK / WarpGemm1::kK>; /// Number of warp-level GEMM oeprations static int const kWarpGemmIterations0 = (WarpGemm0::kK / Operator0::Policy::MmaShape::kK); static int const kWarpGemmIterations1 = (WarpGemm1::kK / Operator1::Policy::MmaShape::kK); /// Number of stages static int const kStages = Stages; // // Nested structs // /// Shared storage object needed by threadblock-scoped GEMM template< typename Shape_, typename Policy_ > class SharedStorage { public: // // Type definitions // using Shape = Shape_; using Policy = Policy_; using Operator = typename Policy::Operator; /// Tensor reference to the A operand using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>; /// Tensor reference to the B operand using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>; /// Shape of the A matrix operand in shared memory using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow, Shape::kK * kStages + Policy::SmemPaddingA::kColumn>; /// Shape of the B matrix operand in shared memory using ShapeB = MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow, Shape::kN + Policy::SmemPaddingB::kColumn>; public: // // Data members // /// Buffer for A operand AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A; /// Buffer for B operand AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B; public: // // Methods // /// Returns a layout object for the A matrix CUTLASS_DEVICE static typename Operator::LayoutA LayoutA() { return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn}); } /// Returns a layout object for the B matrix CUTLASS_HOST_DEVICE static typename Operator::LayoutB LayoutB() { return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn}); } /// Returns a TensorRef to the A operand CUTLASS_HOST_DEVICE TensorRefA operand_A_ref() { return TensorRefA{operand_A.data(), LayoutA()}; } /// Returns a TensorRef to the B operand CUTLASS_HOST_DEVICE TensorRefB operand_B_ref() { return TensorRefB{operand_B.data(), LayoutB()}; } }; using SharedStorage0 = SharedStorage<Shape0, Policy0>; using SharedStorage1 = SharedStorage<Shape1, Policy1>; union B2bMmaSharedStorage { SharedStorage0 shared_storage0; SharedStorage1 shared_storage1; }; protected: // // Data members // /// Iterator to load a warp-scoped tile of A0 operand from shared memory typename Operator0::IteratorA warp_tile_iterator_A0_; /// Iterator to load a warp-scoped tile of B0 operand from shared memory typename Operator0::IteratorB warp_tile_iterator_B0_; /// Iterator to load a warp-scoped tile of B1 operand from shared memory typename Operator1::IteratorB warp_tile_iterator_B1_; public: /// Construct from tensor references CUTLASS_DEVICE B2bMmaBase( ///< Shared storage needed for internal use by threadblock-scoped GEMM B2bMmaSharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx ): warp_tile_iterator_A0_(shared_storage.shared_storage0.operand_A_ref(), lane_idx), warp_tile_iterator_B0_(shared_storage.shared_storage0.operand_B_ref(), lane_idx), warp_tile_iterator_B1_(shared_storage.shared_storage1.operand_B_ref(), lane_idx) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
examples/13_two_tensor_op_fusion/threadblock/b2b_mma_base.h/0
{ "file_path": "examples/13_two_tensor_op_fusion/threadblock/b2b_mma_base.h", "repo_id": "examples", "token_count": 2697 }
3
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* This example demonstrates how to call a CUTLASS TRMM kernel and provides a naive reference matrix multiply kernel to verify its correctness. The CUTLASS Trmm template is instantiated in the function CutlassStrmmNN. This is kernel computes the triangular matrix product (TRMM) using double-precision floating-point arithmetic and assumes all matrices have column-major layout. The threadblock tile size is chosen as 64x64x16 which offers good performance for large matrices. See the CUTLASS Parallel for All blog post for more exposition on the tunable parameters available in CUTLASS. https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/ Aside from defining and launching the STRMM kernel, this example does not use any other components or utilities within CUTLASS. Such utilities are demonstrated elsewhere in other examples and are prevalent in the CUTLASS unit tests. */ // Standard Library includes #include <iostream> #include <sstream> #include <vector> // Helper methods to check for errors #include "helper.h" // // CUTLASS includes needed for double-precision TRMM kernel // // Defines cutlass::gemm::device::Trmm, the generic Trmm computation template class. #include "cutlass/gemm/device/trmm.h" /////////////////////////////////////////////////////////////////////////////////////////////////// // // This function defines a CUTLASS TRMM kernel instantiation, constructs its parameters object, // and launches it on the CUDA device. // /////////////////////////////////////////////////////////////////////////////////////////////////// /// Define a CUTLASS TRMM template and launch a TRMM kernel. cudaError_t CutlassStrmmNN( int M, int N, double alpha, double const *A, int lda, double const *B, int ldb, double *C, int ldc) { // Define type definition for double-precision CUTLASS TRMM with column-major // input matrices and 64x64x16 threadblock tile size (chosen by default). // // To keep the interface manageable, several helpers are defined for plausible compositions // including the following example for double-precision TRMM. Typical values are used as // default template arguments. // // To view the full trmm device API interface, see `cutlass/gemm/device/trmm.h` using ColumnMajor = cutlass::layout::ColumnMajor; using CutlassTrmm = cutlass::gemm::device::Trmm< double, ColumnMajor, cutlass::SideMode::kLeft, cutlass::FillMode::kLower, cutlass::DiagType::kNonUnit, double, ColumnMajor, double, ColumnMajor, double, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<64, 64, 16>, cutlass::gemm::GemmShape<32, 32, 16>, cutlass::gemm::GemmShape<8, 8, 4>, cutlass::epilogue::thread::LinearCombination< double, 1, double, double, cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 5, 1, 1, false, cutlass::arch::OpMultiplyAdd >; // Define a CUTLASS TRMM type CutlassTrmm trmm_operator; // Construct the CUTLASS TRMM arguments object. // // One of CUTLASS's design patterns is to define trmm argument objects that are constructible // in host code and passed to kernels by value. These may include pointers, strides, scalars, // and other arguments needed by Trmm and its components. // // The benefits of this pattern are (1.) a structured, composable strategy for passing host-constructible // arguments to kernels and (2.) minimized initialization overhead on kernel entry. // CutlassTrmm::Arguments args(cutlass::gemm::GemmUniversalMode::kGemm, {M, N, M}, // Trmm Problem dimensions in Left-Side Mode 1, // batch_count, {alpha}, // Scalars used in the Epilogue reinterpret_cast<void const *>(A), reinterpret_cast<void const *>(B), reinterpret_cast<void *>(C), // destination matrix D (may be different memory than source C matrix) (int64_t)M*M, // Batch strides (int64_t)M*N, (int64_t)M*N, lda, ldb, ldc); // // Launch the CUTLASS TRMM kernel. // cutlass::Status status = trmm_operator(args); // // Return a cudaError_t if the CUTLASS TRMM operator returned an error code. // if (status != cutlass::Status::kSuccess) { return cudaErrorUnknown; } // Return success, if no errors were encountered. return cudaSuccess; } /////////////////////////////////////////////////////////////////////////////////////////////////// // // The source code after this point in the file is generic CUDA using the CUDA Runtime API // and simple CUDA kernels to initialize matrices and compute the general matrix product. // /////////////////////////////////////////////////////////////////////////////////////////////////// /// Kernel to initialize a matrix with small integers. __global__ void InitializeMatrix_kernel( double *matrix, int ldm, int rows, int columns, int seed = 0, cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < rows && j < columns) { if (fill_mode == cutlass::FillMode::kLower && i < j) return; else if (fill_mode == cutlass::FillMode::kUpper && i > j) return; int offset = i + j * ldm; // Generate arbitrary elements. int const k = 16807; int const m = 16; double value = double(((offset + seed) * k % m) - m / 2); matrix[offset] = value; } } /// Simple function to initialize a matrix to arbitrary small integers. cudaError_t InitializeMatrix(double *matrix, int ldm, int rows, int columns, int seed = 0, cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) { dim3 block(16, 16); dim3 grid( (rows + block.x - 1) / block.x, (columns + block.y - 1) / block.y ); InitializeMatrix_kernel<<< grid, block >>>(matrix, ldm, rows, columns, seed, fill_mode); return cudaGetLastError(); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Allocates device memory for a matrix then fills with arbitrary small integers. cudaError_t AllocateMatrix(double **matrix, int ldm, int rows, int columns, int seed = 0, cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) { cudaError_t result; size_t sizeof_matrix = sizeof(double) * ldm * columns; // Allocate device memory. result = cudaMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix); if (result != cudaSuccess) { std::cerr << "Failed to allocate matrix: " << cudaGetErrorString(result) << std::endl; return result; } // Clear the allocation. result = cudaMemset(*matrix, 0, sizeof_matrix); if (result != cudaSuccess) { std::cerr << "Failed to clear matrix device memory: " << cudaGetErrorString(result) << std::endl; return result; } // Initialize matrix elements to arbitrary small integers. result = InitializeMatrix(*matrix, ldm, rows, columns, seed, fill_mode); if (result != cudaSuccess) { std::cerr << "Failed to initialize matrix: " << cudaGetErrorString(result) << std::endl; return result; } return result; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Naive reference TRMM computation. __global__ void ReferenceTrmm_kernel( int M, int N, double alpha, double const *A, int lda, double const *B, int ldb, double *C, int ldc) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < M && j < N) { double accumulator = 0; for (int k = 0; k < M; ++k) { accumulator += A[i + k * lda] * B[k + j * ldb]; // Since A is in Left-Side Mode } C[i + j * ldc] = alpha * accumulator; } } /// Reference TRMM computation. cudaError_t ReferenceTrmm( int M, int N, double alpha, double const *A, int lda, double const *B, int ldb, double *C, int ldc) { dim3 block(16, 16); dim3 grid( (M + block.x - 1) / block.x, (N + block.y - 1) / block.y ); ReferenceTrmm_kernel<<< grid, block >>>(M, N, alpha, A, lda, B, ldb, C, ldc); return cudaGetLastError(); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Allocate several matrices in GPU device memory and call a double-precision /// CUTLASS TRMM kernel. cudaError_t TestCutlassTrmm(int M, int N, double alpha) { cudaError_t result; // // Define several matrices to be used as operands to TRMM kernels. // // Compute leading dimensions for each matrix. int lda = M; int ldb = M; int ldc = M; // Compute size in bytes of the C matrix. size_t sizeof_C = sizeof(double) * ldc * N; // Define pointers to matrices in GPU device memory. double *A; double *B; double *C_cutlass; double *C_reference; // // Allocate matrices in GPU device memory with arbitrary seeds. // result = AllocateMatrix(&A, lda, M, M, 0, cutlass::FillMode::kLower); if (result != cudaSuccess) { return result; } result = AllocateMatrix(&B, ldb, M, N, 17); if (result != cudaSuccess) { cudaFree(A); return result; } result = AllocateMatrix(&C_cutlass, ldc, M, N, 101); if (result != cudaSuccess) { cudaFree(A); cudaFree(B); return result; } result = AllocateMatrix(&C_reference, ldc, M, N, 101); if (result != cudaSuccess) { cudaFree(A); cudaFree(B); cudaFree(C_cutlass); return result; } result = cudaMemcpy(C_reference, C_cutlass, sizeof_C, cudaMemcpyDeviceToDevice); if (result != cudaSuccess) { std::cerr << "Failed to copy C_cutlass matrix to C_reference: " << cudaGetErrorString(result) << std::endl; cudaFree(C_reference); cudaFree(C_cutlass); cudaFree(B); cudaFree(A); return result; } // // Launch CUTLASS TRMM. // result = CutlassStrmmNN(M, N, alpha, A, lda, B, ldb, C_cutlass, ldc); if (result != cudaSuccess) { std::cerr << "CUTLASS TRMM kernel failed: " << cudaGetErrorString(result) << std::endl; cudaFree(C_reference); cudaFree(C_cutlass); cudaFree(B); cudaFree(A); return result; } // // Verify. // // Launch reference TRMM result = ReferenceTrmm(M, N, alpha, A, lda, B, ldb, C_reference, ldc); if (result != cudaSuccess) { std::cerr << "Reference TRMM kernel failed: " << cudaGetErrorString(result) << std::endl; cudaFree(C_reference); cudaFree(C_cutlass); cudaFree(B); cudaFree(A); return result; } // Copy to host and verify equivalence. std::vector<double> host_cutlass(ldc * N, 0); std::vector<double> host_reference(ldc * N, 0); result = cudaMemcpy(host_cutlass.data(), C_cutlass, sizeof_C, cudaMemcpyDeviceToHost); if (result != cudaSuccess) { std::cerr << "Failed to copy CUTLASS TRMM results: " << cudaGetErrorString(result) << std::endl; cudaFree(C_reference); cudaFree(C_cutlass); cudaFree(B); cudaFree(A); return result; } result = cudaMemcpy(host_reference.data(), C_reference, sizeof_C, cudaMemcpyDeviceToHost); if (result != cudaSuccess) { std::cerr << "Failed to copy Reference TRMM results: " << cudaGetErrorString(result) << std::endl; cudaFree(C_reference); cudaFree(C_cutlass); cudaFree(B); cudaFree(A); return result; } // // Free device memory allocations. // cudaFree(C_reference); cudaFree(C_cutlass); cudaFree(B); cudaFree(A); // // Test for bit equivalence of results. // if (host_cutlass != host_reference) { std::cerr << "CUTLASS results incorrect." << std::endl; return cudaErrorUnknown; } return cudaSuccess; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point to basic_trmm example. // // usage: // // 00_basic_trmm <M> <N> <alpha> // int main(int argc, const char *arg[]) { bool notSupported = false; // CUTLASS must be compiled with CUDA 11 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "NVIDIA Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "This example requires compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { return 0; } // // Parse the command line to obtain TRMM dimensions and scalar values. // // TRMM problem dimensions. int problem[2] = { 128, 128 }; for (int i = 1; i < argc && i < 3; ++i) { std::stringstream ss(arg[i]); ss >> problem[i - 1]; } // Scalars used for linear scaling the result of the matrix product. double scalars[1] = { 1 }; for (int i = 3; i < argc && i < 4; ++i) { std::stringstream ss(arg[i]); ss >> scalars[i - 3]; } // // Run the CUTLASS TRMM test. // cudaError_t result = TestCutlassTrmm( problem[0], // TRMM M dimension problem[1], // TRMM N dimension scalars[0] // alpha ); if (result == cudaSuccess) { std::cout << "Passed." << std::endl; } // Exit. return result == cudaSuccess ? 0 : -1; } ///////////////////////////////////////////////////////////////////////////////////////////////////
examples/32_basic_trmm/basic_trmm.cu/0
{ "file_path": "examples/32_basic_trmm/basic_trmm.cu", "repo_id": "examples", "token_count": 5539 }
4
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief SYR2K Grouped Example. This workload computes a batch of SYR2K operations with distinct problem sizes. This example closely follows 24_gemm_grouped. Examples: # Runs a grouped SYR2K with 100 random problem sizes $ ./examples/38_syr2k_grouped/38_syr2k_grouped --groups=100 # Runs a grouped SYR2K with 100 random problem sizes (with SYR2K-K dimension equal to 1024) $ ./examples/38_syr2k_grouped/24_gemm_grouped --groups=100 --k=1024 --verbose=true # Runs a grouped SYR2K that is equivalent to a batched SYR2K $ ./examples/38_syr2k_grouped/38_syr2k_grouped --groups=100 --n=1024 --k=1024 --verbose=true # Execute grouped SYR2K and profile with NSight $ nv-nsight-cu-cli ./examples/38_syr2k_grouped/38_syr2k_grouped --n=256 --k=256 --verbose=true \ --iterations=1 --reference-check=false */ ///////////////////////////////////////////////////////////////////////////////////////////////// #include <chrono> #include <iostream> #include <fstream> #include <sstream> #include <unordered_map> #include <vector> #include "cutlass/blas3.h" #include "cutlass/cutlass.h" #include "cutlass/device_kernel.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/device/default_gemm_configuration.h" #include "cutlass/gemm/kernel/rank_2k_grouped.h" #include "cutlass/gemm/kernel/default_rank_2k_grouped.h" #include "cutlass/gemm/device/rank_2k_grouped.h" #include "cutlass/gemm/device/rank_2k.h" #include "cutlass/util/command_line.h" #include "cutlass/util/distribution.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/rank_2k_complex.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/tensor_fill.h" #include "cutlass/util/reference/host/tensor_norm.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double runtime_ms; double initialization_time_ms; double gflops; cutlass::Status status; cudaError_t error; bool passed; // // Methods // Result( double runtime_ms = 0, double initialization_time_ms = 0, double gflops = 0, cutlass::Status status = cutlass::Status::kSuccess, cudaError_t error = cudaSuccess ): runtime_ms(runtime_ms), initialization_time_ms(initialization_time_ms), gflops(gflops), status(status), error(error), passed(true) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; bool error; bool reference_check; bool profile_initialization; bool sort_problems; std::vector<cutlass::gemm::GemmCoord> problem_sizes; int alignment; int problem_count; int iterations; int cuda_streams; bool verbose; float alpha; float beta; std::string benchmark_path; std::string output_tag; std::ofstream output_file; using GroupScheduleMode = cutlass::gemm::kernel::GroupScheduleMode; std::vector<GroupScheduleMode> scheduler_modes; std::unordered_map<std::string, GroupScheduleMode> str_to_scheduler_mode = { {"kDeviceOnly", GroupScheduleMode::kDeviceOnly}, {"kHostPrecompute", GroupScheduleMode::kHostPrecompute} }; struct GroupScheduleModeHash { size_t operator()(GroupScheduleMode m) const { return static_cast<size_t>(m); } }; std::unordered_map<GroupScheduleMode, std::string, GroupScheduleModeHash> scheduler_mode_to_str = { {GroupScheduleMode::kDeviceOnly, "kDeviceOnly"}, {GroupScheduleMode::kHostPrecompute, "kHostPrecompute"} }; std::vector<GroupScheduleMode> all_scheduler_modes = {GroupScheduleMode::kDeviceOnly, GroupScheduleMode::kHostPrecompute}; // // Methods // Options(): help(false), error(false), alignment(8), reference_check(true), profile_initialization(false), sort_problems(false), problem_count(5), iterations(20), cuda_streams(0), verbose(false), alpha(1), beta(), scheduler_modes({GroupScheduleMode::kDeviceOnly}) { } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; return; } cmd.get_cmd_line_argument("alignment", alignment, 8); cmd.get_cmd_line_argument("groups", problem_count, 5); cmd.get_cmd_line_argument("alpha", alpha, 1.0f); cmd.get_cmd_line_argument("beta", beta, 0.0f); cmd.get_cmd_line_argument("iterations", iterations, 20); cmd.get_cmd_line_argument("streams", cuda_streams, 0); cmd.get_cmd_line_argument("verbose", verbose, false); cmd.get_cmd_line_argument("reference-check", reference_check, true); cmd.get_cmd_line_argument("profile-initialization", profile_initialization, false); cmd.get_cmd_line_argument("sort-problems", sort_problems, false); cmd.get_cmd_line_argument("benchmark", benchmark_path); std::vector<std::string> scheduler_mode_strs; cmd.get_cmd_line_arguments("scheduler-modes", scheduler_mode_strs); if (!scheduler_mode_strs.empty()) { scheduler_modes.clear(); if (scheduler_mode_strs.size() == 1 && scheduler_mode_strs[0] == "all") { scheduler_modes = all_scheduler_modes; } else { for (std::string precomp_str : scheduler_mode_strs) { auto it = str_to_scheduler_mode.find(precomp_str); if (it != str_to_scheduler_mode.end()) { scheduler_modes.push_back(it->second); } else if (precomp_str == "all") { std::cerr << "Flag --scheduler-modes=all must not contain other scheduler modes in list." << std::endl; error = true; return; } else { std::cerr << "Unrecognized scheduler mode '" << precomp_str << "'" << std::endl; error = true; return; } } } } std::string output_path; cmd.get_cmd_line_argument("tag", output_tag); cmd.get_cmd_line_argument("output_file", output_path); if (!output_path.empty()) { std::ios_base::openmode open_mode = std::ios_base::out; std::ifstream input_file(output_path.c_str()); if (input_file.good()) { open_mode = std::ios_base::app; input_file.close(); } output_file.open(output_path.c_str(), open_mode); if (output_file.good() && open_mode != std::ios_base::app) { output_file << "Tag,Provider,Kind,Groups,Runtime,GFLOPs\n"; } } // Decide how to initialize the problems if (!benchmark_path.empty()) { if (!benchmark_problems()) { error = true; problem_sizes.clear(); return; } } else { randomize_problems(cmd); } } void randomize_problems(cutlass::CommandLine &cmd) { // // For now, randomly choose the problem sizes. // int cmd_line_m = -1; int cmd_line_n = -1; int cmd_line_k = -1; cmd.get_cmd_line_argument("m", cmd_line_m); cmd.get_cmd_line_argument("n", cmd_line_n); cmd.get_cmd_line_argument("k", cmd_line_k); // SYR2K is defined via only N and K. if (cmd_line_m != -1) { std::cerr << "Parameter M is ignored for SYR2K\n"; error = true; return; } problem_sizes.reserve(problem_count); for (int i = 0; i < problem_count; ++i) { int n = cmd_line_n; int k = cmd_line_k; if (n < 1) { n = alignment * ((rand() % 256) + 1); } if (k < 1) { k = alignment * ((rand() % 256) + 1); } // SYR2K is defined only in terms of N and K. Replicate N into // the SYR2K-N dimension. cutlass::gemm::GemmCoord problem(n, n, k); problem_sizes.push_back(problem); } } /// Load a benchmark bool benchmark_problems() { std::ifstream file(benchmark_path); if (!file.good()) { return false; } while (file.good()) { int idx = -1; std::string extent_str; file >> idx >> extent_str; if (idx < 0 || extent_str.empty()) { break; } cutlass::gemm::GemmCoord extent; std::vector<std::string> tokens; cutlass::CommandLine::tokenize(tokens, extent_str, 'x'); for (int i = 0; i < int(tokens.size()); ++i) { int x = std::atoi(tokens.at(i).c_str()); // round up if (x % alignment) { x += (alignment - (x % alignment)); } extent.at(i) = x; } if (extent.product()) { problem_sizes.push_back(extent); } } problem_count = int(problem_sizes.size()); return true; } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "38_syr2k_grouped\n\n" << " This example profiles the performance of a 'grouped' SYR2K kernel. This example closely follows 24_gemm_grouped\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --benchmark=<str> Executes a benchmark problem size.\n" << " --output_file=<str> Path to a CSV file to output results. If it exists already, results are appended.\n" << " --tag=<str> String tag to prepend to the CSV file.\n" << " --groups=<int> Number of individual SYR2K problems (default: --groups=15)\n" << " --m=<int> Sets the M dimension for all groups. Otherwise, it is selected randomly\n" << " --n=<int> Sets the N dimension for all groups. Otherwise, it is selected randomly\n" << " --k=<int> Sets the K dimension for all groups. Otherwise, it is selected randomly\n" << " --alpha=<f32> Epilogue scalar alpha (real part)\n" << " --beta=<f32> Epilogue scalar beta (real part)\n" << " --scheduler-modes=<str> List of scheduler modes to be profile for grouped GEMM scheduler (default: --scheduler_modes=kDeviceOnly)\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --reference-check=<bool> If true, performs reference check.\n" << " --verbose=<bool> If true, prints problem sizes and batching structure.\n" << " --profile-initialization=<bool> If true, profiles the device-level kernel's initialization.\n" << " --sort-problems=<bool> If true, sorts problem sizes in descending order of SYR2K-K dimension.\n"; out << "\n\nExamples:\n\n" << "# Runs a grouped SYR2K with 100 random problem sizes\n" << "$ ./examples/38_syr2k_grouped/38_syr2k_grouped --groups=100\n\n" << "# Runs a grouped SYR2K with 100 random problem sizes (with K dimension equal to 1024)\n" << "$ ./examples/38_syr2k_grouped/38_syr2k_grouped --groups=100 --k=1024 --verbose=true\n\n" << "# Runs a grouped SYR2K that is equivalent to a batched SYR2K\n" << "$ ./examples/38_syr2k_grouped/38_syr2k_grouped --groups=100 --n=1024 --k=1024 --verbose=true\n\n" << "# Runs a grouped SYR2K with each different scheduler mode\n" << "$ ./examples/38_syr2k_grouped/38_syr2k_grouped --scheduler-modes=all\n\n" << "# Runs a grouped SYR2K with each different scheduler mode and profiles host-side initialization time\n" << "$ ./examples/38_syr2k_grouped/38_syr2k_grouped --scheduler-modes=all --profile-initialization=true\n\n" << "# Runs a grouped SYR2K problem given an externally supplied benchmark file. This is a text file in which\n" << "# Each line contains a unique group index and an MxNxK triple indicating problemsize. NOTE that the\n" << "# GEMM-M and GEMM-N dimensions must match.\n" << "#\n" << "# For example, assume the following are the contents of 'problems.txt'\n" << "#\n" << "# 0 256x256x520\n" << "# 1 264x264x1024\n" << "# 2 48x48x1024\n" << "#\n" << "$ ./examples/38_syr2k_grouped/38_syr2k_grouped --benchmark=problems.txt\n\n" << "# Execute Grouped SYR2K and profile with NSight\n" << "$ nv-nsight-cu-cli ./examples/38_syr2k_grouped/38_syr2k_grouped --n=256 --k=256 --verbose=true --iterations=1 --reference-check=false\n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of real-valued multiply-adds int64_t fmas = int64_t(); for (auto const & problem : problem_sizes) { fmas += problem.product(); } // SYR2K is defined as (A x BT) + (B x AT), so the number of FMAs is twice that in a GEMM fmas *= 2; // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Rank2K> class BaseTestbed { public: // // Type definitions // using ElementA = typename Rank2K::ElementA; using ElementB = typename Rank2K::ElementB; using ElementC = typename Rank2K::ElementC; using ElementAccumulator = typename Rank2K::ElementAccumulator; using EpilogueOutputOp = typename Rank2K::Rank2Kkernel::Epilogue::OutputOp; using ElementCompute = typename EpilogueOutputOp::ElementCompute; using LayoutA = typename Rank2K::LayoutA; using LayoutB = typename Rank2K::LayoutB; using LayoutC = typename Rank2K::LayoutC; using MatrixCoord = typename LayoutC::TensorCoord; // // Data members // Options & options; /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; uint32_t seed; cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device; std::vector<int64_t> offset_A; std::vector<int64_t> offset_B; std::vector<int64_t> offset_C; std::vector<int64_t> offset_D; std::vector<int64_t> lda_host; std::vector<int64_t> ldb_host; std::vector<int64_t> ldc_host; std::vector<int64_t> ldd_host; cutlass::DeviceAllocation<int64_t> lda; cutlass::DeviceAllocation<int64_t> ldb; cutlass::DeviceAllocation<int64_t> ldc; cutlass::DeviceAllocation<int64_t> ldd; cutlass::DeviceAllocation<ElementA> block_A; cutlass::DeviceAllocation<ElementB> block_B; cutlass::DeviceAllocation<ElementC> block_C; cutlass::DeviceAllocation<ElementC> block_D; cutlass::DeviceAllocation<ElementA *> ptr_A; cutlass::DeviceAllocation<ElementB *> ptr_B; cutlass::DeviceAllocation<ElementC *> ptr_C; cutlass::DeviceAllocation<ElementC *> ptr_D; BaseTestbed( Options &options_, cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint32_t seed_ = 3080 ): options(options_), init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } int problem_count() const { return options.problem_count; } /// Helper to initialize a tensor view template <typename Element> void initialize_tensor( Element *ptr, size_t capacity, cutlass::Distribution::Kind dist_kind, uint32_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { Element scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } } else { scope_max = 8; scope_min = -8; } cutlass::reference::device::BlockFillRandomUniform( ptr, capacity, seed, scope_max, scope_min, 0); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::device::BlockFillRandomGaussian( ptr, capacity, seed, Element(), Element(0.5f)); } else if (dist_kind == cutlass::Distribution::Sequential) { // Fill with increasing elements cutlass::reference::device::BlockFillSequential( ptr, capacity, Element(1), Element()); } else { // Fill with all 1s cutlass::reference::device::BlockFillSequential( ptr, capacity, Element(), Element(1)); } } /// Allocates device-side data void allocate() { int64_t total_elements_A = 0; int64_t total_elements_B = 0; int64_t total_elements_C = 0; int64_t total_elements_D = 0; lda_host.resize(problem_count()); ldb_host.resize(problem_count()); ldc_host.resize(problem_count()); ldd_host.resize(problem_count()); for (int32_t i = 0; i < problem_count(); ++i) { auto problem = options.problem_sizes.at(i); lda_host.at(i) = LayoutA::packed({problem.n(), problem.k()}).stride(0); ldb_host.at(i) = LayoutB::packed({problem.n(), problem.k()}).stride(0); ldc_host.at(i) = LayoutC::packed({problem.n(), problem.n()}).stride(0); ldd_host.at(i) = LayoutC::packed({problem.n(), problem.n()}).stride(0); offset_A.push_back(total_elements_A); offset_B.push_back(total_elements_B); offset_C.push_back(total_elements_C); offset_D.push_back(total_elements_D); int64_t elements_A = problem.n() * problem.k(); int64_t elements_B = problem.n() * problem.k(); int64_t elements_C = problem.n() * problem.n(); int64_t elements_D = problem.n() * problem.n(); total_elements_A += elements_A; total_elements_B += elements_B; total_elements_C += elements_C; total_elements_D += elements_D; } lda.reset(problem_count()); ldb.reset(problem_count()); ldc.reset(problem_count()); ldd.reset(problem_count()); block_A.reset(total_elements_A); block_B.reset(total_elements_B); block_C.reset(total_elements_C); block_D.reset(total_elements_D); } /// Initializes device-side data void initialize() { problem_sizes_device.reset(problem_count()); problem_sizes_device.copy_from_host(options.problem_sizes.data()); lda.copy_from_host(lda_host.data()); ldb.copy_from_host(ldb_host.data()); ldc.copy_from_host(ldc_host.data()); ldd.copy_from_host(ldd_host.data()); // // Assign pointers // std::vector<ElementA *> ptr_A_host(problem_count()); std::vector<ElementB *> ptr_B_host(problem_count()); std::vector<ElementC *> ptr_C_host(problem_count()); std::vector<ElementC *> ptr_D_host(problem_count()); for (int32_t i = 0; i < problem_count(); ++i) { ptr_A_host.at(i) = block_A.get() + offset_A.at(i); ptr_B_host.at(i) = block_B.get() + offset_B.at(i); ptr_C_host.at(i) = block_C.get() + offset_C.at(i); ptr_D_host.at(i) = block_D.get() + offset_D.at(i); } ptr_A.reset(problem_count()); ptr_A.copy_from_host(ptr_A_host.data()); ptr_B.reset(problem_count()); ptr_B.copy_from_host(ptr_B_host.data()); ptr_C.reset(problem_count()); ptr_C.copy_from_host(ptr_C_host.data()); ptr_D.reset(problem_count()); ptr_D.copy_from_host(ptr_D_host.data()); // // Initialize the problems of the workspace // initialize_tensor(block_A.get(), block_A.size(), init_A, seed * 2021); initialize_tensor(block_B.get(), block_B.size(), init_B, seed * 2022); initialize_tensor(block_C.get(), block_C.size(), init_C, seed * 2023); cutlass::reference::device::BlockFillSequential( block_D.get(), block_D.size(), ElementC(), ElementC()); } /// Verifies the result is a SYR2K bool verify() { bool passed = true; for (int32_t i = 0; i < problem_count(); ++i) { cutlass::gemm::GemmCoord problem = options.problem_sizes.at(i); LayoutA layout_A(lda_host.at(i)); LayoutB layout_B(ldb_host.at(i)); LayoutC layout_C(ldc_host.at(i)); LayoutC layout_D(ldd_host.at(i)); cutlass::HostTensor<ElementA, LayoutA> host_A( typename LayoutA::TensorCoord(problem.n(), problem.k()), /*device_backed=*/false); cutlass::HostTensor<ElementB, LayoutB> host_B( typename LayoutB::TensorCoord(problem.n(), problem.k()), /*device_backed=*/false); cutlass::HostTensor<ElementC, LayoutC> host_C( typename LayoutC::TensorCoord(problem.n(), problem.n()), /*device_backed=*/false); cutlass::HostTensor<ElementC, LayoutC> host_D( typename LayoutC::TensorCoord(problem.n(), problem.n()), /*device_backed=*/false); cutlass::device_memory::copy_to_host(host_A.host_data(), block_A.get() + offset_A.at(i), problem.n() * problem.k()); cutlass::device_memory::copy_to_host(host_B.host_data(), block_B.get() + offset_B.at(i), problem.n() * problem.k()); cutlass::device_memory::copy_to_host(host_C.host_data(), block_C.get() + offset_C.at(i), problem.n() * problem.n()); cutlass::reference::host::BlockFillSequential( host_D.host_data(), problem.n() * problem.n(), ElementC(), ElementC()); MatrixCoord extent_C{problem.n(), problem.n()}; // Reference Rank2K cutlass::reference::host::Rank2KComplex< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementC, ElementAccumulator >( problem, (double)options.alpha, host_A.host_view(), Rank2K::kTransformA, host_B.host_view(), Rank2K::kTransformB, (double)options.beta, host_C.host_view(), host_D.host_view(), ElementAccumulator(0), Rank2K::kFillModeC, Rank2K::kBlasMode ); // Copy to host memory std::vector<ElementC> matrix_D(layout_D.capacity(extent_C)); cutlass::device_memory::copy_to_host(matrix_D.data(), block_D.get() + offset_D.at(i), matrix_D.size()); cutlass::TensorView<ElementC, LayoutC> view_D(matrix_D.data(), layout_D, extent_C); cutlass::TensorView<ElementC, LayoutC> view_Ref = host_D.host_view(); // Reference check passed = cutlass::reference::host::TensorEquals(view_D, view_Ref); if (!passed) { std::cerr << "\n***\nError - problem " << i << " failed the QA check\n***\n" << std::endl; return passed; } } return passed; } }; template <typename Rank2K> class TestbedConventional : BaseTestbed<Rank2K> { public: TestbedConventional( Options &options_, cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint32_t seed_ = 3080 ): BaseTestbed<Rank2K>(options_, init_A_, init_B_, init_C_, seed_) {} /// Verbose printing of problem sizes void print_problem_sizes() { // Print groups std::cout << this->problem_count() << " groups:\n"; int32_t idx = 0; int64_t total_tiles = 0; for (auto const & problem : this->options.problem_sizes) { int tiles = ((problem.m() + Rank2K::ThreadblockShape::kM - 1) / Rank2K::ThreadblockShape::kM) * ((problem.n() + Rank2K::ThreadblockShape::kN - 1) / Rank2K::ThreadblockShape::kN); total_tiles += tiles; std::cout << " [" << idx << "]: " << problem.m() << "-by-" << problem.n() << "-by-" << problem.k() << " (" << tiles << " threadblock tiles)" << "\n"; ++idx; } std::cout << std::endl; } /// Executes a conventional SYR2K kernel. Result profile() { std::cout << "Conventional Rank2K:\n" << "====================================================" << std::endl; Result result; result.passed = false; // Initialize the problem this->allocate(); this->initialize(); if (this->options.verbose) { print_problem_sizes(); } // // Create CUDA streams to maximize concurrency of SYR2K kernels // int32_t effective_streams = (this->options.cuda_streams ? this->options.cuda_streams : 1); std::vector<cudaStream_t> cuda_streams; char const *provider = "CUTLASS"; // // Warmup run // if (this->options.cuda_streams) { for (int i = 0; i < this->options.cuda_streams; ++i) { cudaStream_t stream; result.error = cudaStreamCreate(&stream); if (result.error != cudaSuccess) { std::cerr << "Failed to create CUDA stream." << std::endl; return result; } cuda_streams.push_back(stream); } } else { cuda_streams.push_back(nullptr); } // Use 'D' for the in/out workspace this->block_D.copy_from_device(this->block_C.get()); for (size_t i = 0; i < this->options.problem_sizes.size(); ++i) { cutlass::gemm::GemmCoord const & problem = this->options.problem_sizes[i]; int32_t batch_count = 1; int64_t lda = this->lda_host.at(i); int64_t ldb = this->ldb_host.at(i); int64_t ldc = this->ldc_host.at(i); typename Rank2K::ElementA* ptrA = this->block_A.get() + this->offset_A.at(i); typename Rank2K::ElementB* ptrB = this->block_B.get() + this->offset_B.at(i); typename Rank2K::ElementC* ptrC = this->block_C.get() + this->offset_C.at(i); typename Rank2K::ElementC* ptrD = this->block_D.get() + this->offset_D.at(i); // // Initialize the CUTLASS SYR2K operator // // Configure the SYR2K arguments typename Rank2K::EpilogueOutputOp::Params epilogue_op(this->options.alpha, this->options.beta); typename Rank2K::Arguments arguments{ cutlass::gemm::GemmUniversalMode::kGemm, problem, batch_count, epilogue_op, (void const *)ptrA, (void const *)ptrB, (void const *)ptrC, (void *)ptrD, int64_t(), int64_t(), int64_t(), int64_t(), int64_t(lda), int64_t(ldb), int64_t(ldc), int64_t(ldc) }; Rank2K rank2k_op; cutlass::Status status = rank2k_op.initialize(arguments); if (status != cutlass::Status::kSuccess) { std::cerr << "CUTLASS error on line " << __LINE__ << std::endl; return result; } status = rank2k_op(); if (status != cutlass::Status::kSuccess) { std::cerr << "CUTLASS error on line " << __LINE__ << std::endl; return result; } } // // Wait for completion // result.error = cudaDeviceSynchronize(); if (result.error != cudaSuccess) { std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error); return result; } // // Construct events // cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return -1; } } // // Wait for completion // result.error = cudaDeviceSynchronize(); if (result.error != cudaSuccess) { std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error); return result; } // Record an event at the start of a series of SYR2K operations result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // // Run profiling loop // size_t last_stream_idx = 0; for (int iter = 0; iter < this->options.iterations; ++iter) { for (size_t i = 0; i < this->options.problem_sizes.size(); ++i) { cutlass::gemm::GemmCoord const & problem = this->options.problem_sizes[i]; int32_t batch_count = 1; int64_t lda = this->lda_host.at(i); int64_t ldb = this->ldb_host.at(i); int64_t ldc = this->ldc_host.at(i); typename Rank2K::ElementA* ptrA = this->block_A.get() + this->offset_A.at(i); typename Rank2K::ElementB* ptrB = this->block_B.get() + this->offset_B.at(i); typename Rank2K::ElementC* ptrC = this->block_C.get() + this->offset_C.at(i); typename Rank2K::ElementC* ptrD = this->block_D.get() + this->offset_D.at(i); last_stream_idx = (i % effective_streams); // // Initialize the CUTLASS SYR2K operator // // Configure the SYR2K arguments typename Rank2K::EpilogueOutputOp::Params epilogue_op(this->options.alpha, this->options.beta); typename Rank2K::Arguments arguments{ cutlass::gemm::GemmUniversalMode::kGemm, problem, batch_count, epilogue_op, (void const *)ptrA, (void const *)ptrB, (void const *)ptrC, (void *)ptrD, int64_t(), int64_t(), int64_t(), int64_t(), int64_t(lda), int64_t(ldb), int64_t(ldc), int64_t(ldc) }; Rank2K rank2k_op; cutlass::Status status = rank2k_op.initialize(arguments); if (status != cutlass::Status::kSuccess) { std::cerr << "CUTLASS error on line " << __LINE__ << std::endl; return result; } status = rank2k_op(cuda_streams[last_stream_idx]); if (status != cutlass::Status::kSuccess) { std::cerr << "CUTLASS error on line " << __LINE__ << std::endl; return result; } } } // // Stop profiling loop // // Record an event when the SYR2K operations have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // // Wait for work to be completed // result.error = cudaDeviceSynchronize(); if (result.error != cudaSuccess) { std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error); return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Compute average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(this->options.iterations); result.gflops = this->options.gflops(result.runtime_ms / 1000.0); // // Cleanup // for (auto event : events) { (void)cudaEventDestroy(event); } for (auto stream : cuda_streams) { if (stream) { (void)cudaStreamDestroy(stream); } } std::cout << " " << this->options.problem_sizes.size() << " conventional Rank2Ks launched" << std::endl; std::cout << std::endl; std::cout << " " << "Conventional Runtime: " << result.runtime_ms << " ms" << std::endl; std::cout << " " << "Conventional GFLOPS: " << result.gflops << std::endl; if (this->options.output_file.good()) { this->options.output_file << this->options.output_tag << "," << provider << ",conventional," << this->problem_count() << "," << result.runtime_ms << "," << result.gflops << std::endl; } result.passed = true; return result; } }; template <typename Rank2K_, cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode_> class TestbedGrouped : BaseTestbed<Rank2K_> { public: TestbedGrouped( Options &options_, cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint32_t seed_ = 3080 ) : BaseTestbed<Rank2K_>(options_, init_A_, init_B_, init_C_, seed_) {} // Redefine Rank2K with different GroupScheduleMode_ using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< typename Rank2K_::ElementA, typename Rank2K_::LayoutA, Rank2K_::kTransformA, Rank2K_::kAlignmentA, typename Rank2K_::ElementB, typename Rank2K_::LayoutB, Rank2K_::kTransformB, Rank2K_::kAlignmentB, typename Rank2K_::ElementC, typename Rank2K_::LayoutC, Rank2K_::kFillModeC, typename Rank2K_::ElementAccumulator, typename Rank2K_::OperatorClass, typename Rank2K_::ArchTag, typename Rank2K_::ThreadblockShape, typename Rank2K_::WarpShape, typename Rank2K_::InstructionShape, typename Rank2K_::EpilogueOutputOp, typename Rank2K_::ThreadblockSwizzle, Rank2K_::kStages, typename Rank2K_::Operator::ArchMmaOperator::Operator, Rank2K_::kBlasMode, GroupScheduleMode_>::Rank2Kkernel; using Rank2K = cutlass::gemm::device::Rank2KGrouped<Rank2Kkernel>; /// Verbose printing of problem sizes void print_problem_sizes() { // Print groups std::cout << this->problem_count() << " groups:\n"; int32_t idx = 0; int64_t total_tiles = 0; for (auto const & problem : this->options.problem_sizes) { int tiles = Rank2K::problem_tile_count(problem); total_tiles += tiles; std::cout << " [" << idx << "]: " << problem.m() << "-by-" << problem.n() << "-by-" << problem.k() << " (" << tiles << " threadblock tiles)" << "\n"; ++idx; } std::cout << std::endl; } /// Sort problems in descending order of problem-K dimension void sort_problems() { Rank2K::sort_problems(this->options.problem_count, this->options.problem_sizes.data(), this->lda_host.data(), this->ldb_host.data(), this->ldc_host.data(), this->ldd_host.data(), this->offset_A.data(), this->offset_B.data(), this->offset_C.data(), this->offset_D.data()); } /// Executes a grouped kernel and measures runtime. Result profile() { std::string sched_mode = this->options.scheduler_mode_to_str.find(GroupScheduleMode_)->second; std::cout << std::endl; std::cout << "Grouped Rank2K (CUTLASS) with mode " << sched_mode << ":\n" << "====================================================" << std::endl; Result result; int threadblock_count = Rank2K::sufficient(this->options.problem_sizes.data(), this->options.problem_count); // Early exit if (!threadblock_count) { std::cout << "Active CUDA device lacks hardware resources to run CUTLASS Grouped SYR2K kernel." << std::endl; return result; } result.passed = false; // Initialize the problem this->allocate(); if (this->options.sort_problems) { sort_problems(); } this->initialize(); if (this->options.verbose) { print_problem_sizes(); } // Configure the Rank2K arguments typename Rank2K::EpilogueOutputOp::Params epilogue_op(this->options.alpha, this->options.beta); // Configure Rank2K arguments typename Rank2K::Arguments args( cutlass::gemm::GemmUniversalMode::kGemm, this->problem_sizes_device.get(), this->problem_count(), threadblock_count, epilogue_op, this->ptr_A.get(), this->ptr_B.get(), this->ptr_C.get(), this->ptr_D.get(), this->lda.get(), this->ldb.get(), this->ldc.get(), this->ldd.get(), this->options.problem_sizes.data() ); // Initialize the Rank2K object Rank2K rank2k{}; size_t workspace_size = rank2k.get_workspace_size(args); cutlass::DeviceAllocation<uint8_t> workspace(workspace_size); result.status = rank2k.initialize(args, workspace.get()); if (result.status != cutlass::Status::kSuccess) { std::cerr << "Failed to initialize CUTLASS Grouped Rank2K kernel." << std::endl; return result; } // Run the grouped Rank2K object result.status = rank2k.run(); if (result.status != cutlass::Status::kSuccess) { std::cerr << "Failed to run CUTLASS Grouped Rank2K kernel." << std::endl; return result; } // Wait for completion result.error = cudaDeviceSynchronize(); if (result.error != cudaSuccess) { std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error); return result; } // // Verify correctness // result.passed = true; if (this->options.reference_check) { result.passed = this->verify(); } // // Warm-up run of the grouped Rank2K object // result.status = rank2k.run(); if (result.status != cutlass::Status::kSuccess) { std::cerr << "Failed to run CUTLASS Grouped Rank2K kernel." << std::endl; return result; } // // Construct events // cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return -1; } } // Record an event at the start of a series of SYR2K operations result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // // Run profiling loop // for (int iter = 0; iter < this->options.iterations; ++iter) { rank2k(); } // // Stop profiling loop // // Record an event when the Rank2K operations have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Compute average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(this->options.iterations); result.gflops = this->options.gflops(result.runtime_ms / 1000.0); // // Cleanup // for (auto event : events) { (void)cudaEventDestroy(event); } // Optionally profile initialization if (this->options.profile_initialization) { // Warm up rank2k.initialize(args, workspace.get()); auto start_time = std::chrono::high_resolution_clock::now(); for (int32_t i = 0; i < this->options.iterations; ++i) { rank2k.initialize(args, workspace.get()); } auto end_time = std::chrono::high_resolution_clock::now(); std::chrono::duration<double, std::milli> duration = end_time - start_time; duration /= double(this->options.iterations); result.initialization_time_ms = duration.count(); } int64_t total_tiles = Rank2K::group_tile_count(args); std::cout << " " << total_tiles << " total threadblock tiles." << std::endl; std::cout << std::endl; std::cout << " " << "Grouped Runtime: " << result.runtime_ms << " ms" << std::endl; std::cout << " " << "Grouped GFLOPs: " << result.gflops << std::endl; if (this->options.profile_initialization) { std::cout << " " << "Init Runtime: " << result.initialization_time_ms << " ms" << std::endl; } if (this->options.output_file.good()) { this->options.output_file << this->options.output_tag << ",CUTLASS,grouped-" << sched_mode << "," << this->problem_count() << "," << result.runtime_ms << "," << result.gflops << std::endl; } std::cout << "\nPassed\n"; return result; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) { // // This example requires an NVIDIA Ampere-architecture GPU. // std::cout << "CUTLASS's Grouped Rank2K example requires a GPU of NVIDIA's Ampere Architecture or " << "later (compute capability 80 or greater).\n"; return 0; } // // Parse options // Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.error) { std::cerr << "Aborting execution." << std::endl; return -1; } // // Define the Grouped and Conventional Rank2K types // using ElementA = double; using ElementB = double; using ElementOutput = double; using ElementAccumulator = double; const cutlass::FillMode kFillModeC = cutlass::FillMode::kLower; const int kAlignmentA = 1; const int kAlignmentB = 1; const cutlass::ComplexTransform kTransformA = cutlass::ComplexTransform::kNone; const cutlass::ComplexTransform kTransformB = cutlass::ComplexTransform::kNone; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using OperatorClass = cutlass::arch::OpClassTensorOp; using ArchTag = cutlass::arch::Sm80; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, 1, ElementAccumulator, ElementAccumulator>; // NOTE: Threadblock swizzling is currently not supported by CUTLASS's grouped kernels. // This parameter is passed in at present to match the APIs of other kernels. The parameter // is unused within the kernel. using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; const int kStages = 4; const bool kSplitKSerial = false; using Operator = cutlass::arch::OpMultiplyAdd; const cutlass::BlasMode kBlasMode = cutlass::BlasMode::kSymmetric; // Define a grouped Rank2K kernel with all template parameters set except // for scheduling mode. This will be used as the template for all scheduling // modes executed. using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped< ElementA, LayoutA, kTransformA, kAlignmentA, ElementB, LayoutB, kTransformB, kAlignmentB, ElementOutput, LayoutC, kFillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, kStages, Operator, kBlasMode>::Rank2Kkernel; using Rank2KGrouped = cutlass::gemm::device::Rank2KGrouped<Rank2Kkernel>; // Rank2k operator using Rank2KConventional = cutlass::gemm::device::Rank2K< ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, kFillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, kStages, kAlignmentA, kAlignmentB, kSplitKSerial, Operator, kTransformA, kTransformB, kBlasMode >; // // Profile it // TestbedConventional<Rank2KConventional> testbed(options); Result result = testbed.profile(); if (!result.passed) { std::cout << "Profiling CUTLASS conventional Rank2K has failed.\n"; std::cout << "\nFailed\n"; return -1; } using GroupScheduleMode = cutlass::gemm::kernel::GroupScheduleMode; for (GroupScheduleMode mode : options.scheduler_modes) { Result result; switch (mode) { case GroupScheduleMode::kDeviceOnly: { TestbedGrouped<Rank2KGrouped, GroupScheduleMode::kDeviceOnly> runner(options); result = runner.profile(); break; } case GroupScheduleMode::kHostPrecompute: { TestbedGrouped<Rank2KGrouped, GroupScheduleMode::kHostPrecompute> runner(options); result = runner.profile(); break; } } if (result.error != cudaSuccess) { return 1; } // Override verbose flag to avoid printing duplicate information for each scheduling mode options.verbose = false; } return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
examples/38_syr2k_grouped/syr2k_grouped.cu/0
{ "file_path": "examples/38_syr2k_grouped/syr2k_grouped.cu", "repo_id": "examples", "token_count": 19343 }
5
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are accommodated by exchanging A and B operands and assuming transposed layouts. Partial specializations here choose 'device::GemmTransposed' to implement this functionality. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/complex.h" #include "cutlass/layout/matrix.h" #include "cutlass/numeric_types.h" #include "fmha_grouped.h" #include "gemm_kernel_utils.h" #include "gemm/custom_mma.h" #include "gemm/find_default_mma.h" #include "gemm/mma_from_smem.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < // The datatype of Q/K/V typename scalar_t_, // Architecture we are targeting (eg `cutlass::arch::Sm80`) typename ArchTag_, // If Q/K/V are correctly aligned in memory and we can run a fast kernel bool isAligned_, int kQueriesPerBlock, int kKeysPerBlock, int kMaxK = (int)cutlass::platform::numeric_limits<uint32_t>::max(), GroupScheduleMode GroupScheduleMode_ = GroupScheduleMode::kDeviceOnly > struct DefaultFMHAGrouped { using scalar_t = scalar_t_; using accum_t = float; using output_t = scalar_t; // Accumulator between 2 iterations // Using `accum_t` improves perf on f16 at the cost of // numerical errors using output_accum_t = accum_t; using ArchTag = ArchTag_; static bool const kIsAligned = isAligned_; static bool const kSingleValueIteration = kMaxK <= kKeysPerBlock; static constexpr bool kIsHalf = cutlass::sizeof_bits<scalar_t>::value == 16; static int const kWarpSize = 32; static int const kNumWarpsPerBlock = kQueriesPerBlock * kKeysPerBlock / (kWarpSize * kWarpSize); struct MM0 { /* In this first matmul, we compute a block of `Q @ K.T`. While the calculation result is still hot in registers, we update `mi`, `m_prime`, `s_prime` in shared-memory, and then store this value into a shared-memory ("AccumulatorSharedStorage") that is used later as operand A for the second matmul (see MM1) */ using GemmType = gemm_kernel_utils::DefaultGemmType<ArchTag, scalar_t>; using OpClass = typename GemmType::OpClass; using ElementA = scalar_t; using ElementB = scalar_t; using ElementC = scalar_t; using ElementAccumulator = accum_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using DefaultConfig = typename cutlass::gemm::device::DefaultGemmConfiguration< OpClass, ArchTag, ElementA, ElementB, ElementC, ElementAccumulator >; static int const kAlignmentA = kIsAligned ? DefaultConfig::kAlignmentA : GemmType::kMinimumAlignment; static int const kAlignmentB = kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment; using ThreadblockShape = cutlass::gemm::GemmShape<kQueriesPerBlock, kKeysPerBlock, GemmType::ThreadK>; using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>; using InstructionShape = typename GemmType::InstructionShape; static int const kStages = DefaultConfig::kStages; using Operator = typename GemmType::Operator; using DefaultMma = typename cutlass::gemm::threadblock::FindDefaultMma< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementAccumulator, LayoutC, OpClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, ArchTag::kMinComputeCapability >= 80 && kIsHalf ? 4 : DefaultConfig::kStages, Operator >::DefaultMma; using MmaCore = typename DefaultMma::MmaCore; using IteratorA = typename DefaultMma::IteratorA; using IteratorB = typename DefaultMma::IteratorB; using DefaultThreadblockMma = typename DefaultMma::ThreadblockMma; using Mma = typename cutlass::platform::conditional< kSingleValueIteration, typename MakeCustomMma<DefaultThreadblockMma, kMaxK>::Mma, DefaultThreadblockMma>::type; using AccumLambdaIterator = typename DefaultMmaAccumLambdaIterator< typename Mma::Operator::IteratorC, ElementAccumulator, kWarpSize>::Iterator; static_assert(MmaCore::WarpCount::kCount == kNumWarpsPerBlock, ""); // Epilogue to store to shared-memory in a format that we can use later for // the second matmul using B2bGemm = typename cutlass::gemm::threadblock::B2bGemm< typename Mma::Operator::IteratorC, typename Mma::Operator, scalar_t, WarpShape, ThreadblockShape>; using AccumulatorSharedStorage = typename B2bGemm::AccumulatorSharedStorage; }; struct MM1 { /* Second matmul: perform `attn @ V` where `attn` is the attention (not normalized) and stored in shared memory */ using GemmType = typename MM0::GemmType; using OpClass = typename GemmType::OpClass; using ElementA = scalar_t; using ElementB = scalar_t; using ElementC = output_accum_t; using ElementAccumulator = accum_t; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::RowMajor; using DefaultConfig = typename cutlass::gemm::device::DefaultGemmConfiguration< OpClass, ArchTag, ElementA, ElementB, ElementC, ElementAccumulator >; static int const kAlignmentA = DefaultConfig::kAlignmentA; static int const kAlignmentB = kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment; using ThreadblockShape = typename MM0::ThreadblockShape; using WarpShape = typename MM0::WarpShape; using InstructionShape = typename MM0::InstructionShape; using EpilogueOutputOp = typename DefaultConfig::EpilogueOutputOp; static int const kStages = DefaultConfig::kStages; using Operator = typename GemmType::Operator; using ThreadblockSwizzle = void; // Swizzling is unused static bool const kSplitKSerial = false; using DefaultGemm = cutlass::gemm::kernel::DefaultGemm< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, LayoutC, ElementAccumulator, OpClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, ArchTag::kMinComputeCapability >= 80 && kIsHalf ? 4 : DefaultConfig::kStages, kSplitKSerial, Operator>; using WarpIteratorA = typename cutlass::gemm::threadblock:: DefaultWarpIteratorAFromSharedMemory< typename DefaultGemm::Mma::Policy::Operator::Shape, // WarpShape typename DefaultGemm::Mma::Policy::Operator::InstructionShape, typename DefaultGemm::Mma::Policy::Operator::IteratorA, typename DefaultGemm::Mma::Policy>::WarpIterator; using DefaultMmaFromSmem = typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory< typename DefaultGemm::Mma, MM0::AccumulatorSharedStorage::Shape::kN, // kMaxK WarpIteratorA, false>; // kScaleOperandA using Mma = typename DefaultMmaFromSmem::Mma; using IteratorB = typename Mma::IteratorB; using WarpCount = typename Mma::WarpCount; static_assert(WarpCount::kCount == kNumWarpsPerBlock, ""); using DefaultEpilogue = typename DefaultGemm::Epilogue; using OutputTileIterator = typename cutlass::epilogue::threadblock::PredicatedTileIterator< typename DefaultEpilogue::OutputTileIterator::ThreadMap, output_t>; using OutputTileIteratorAccum = typename cutlass::epilogue::threadblock::PredicatedTileIterator< typename DefaultEpilogue::OutputTileIterator::ThreadMap, output_accum_t>; }; /// Define the kernel in terms of the default kernel using FMHAKernel = kernel::FMHAGrouped< MM0, MM1, scalar_t, accum_t, output_t, output_accum_t, kSingleValueIteration, GroupScheduleMode_ >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
examples/41_fused_multi_head_attention/default_fmha_grouped.h/0
{ "file_path": "examples/41_fused_multi_head_attention/default_fmha_grouped.h", "repo_id": "examples", "token_count": 3924 }
6
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tools and utils to store a GEMM output in shmem, and to use that output as operandA for another GEMM back-to-back */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/threadblock/default_epilogue_simt.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/functional.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_conversion.h" #include "cutlass/numeric_types.h" #include "cutlass/platform/platform.h" #include "cutlass/transform/threadblock/vector_iterator.h" #include "../epilogue/epilogue_thread_apply_logsumexp.h" #include "../gemm/mma_accum_lambda_iterator.h" #include "../gemm_kernel_utils.h" #include "../iterators/default_warp_iterator_from_smem.h" #include "../iterators/make_residual_last.h" #include "../iterators/transpose_warp_iterator.h" #include "../iterators/warp_iterator_from_smem.h" #include "cutlass/epilogue/threadblock/epilogue_smem_accumulator.h" #include "cutlass/gemm/threadblock/mma_base.h" #include "cutlass/gemm/threadblock/mma_multistage.h" #include "cutlass/gemm/threadblock/mma_pipelined.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h" namespace cutlass { namespace gemm { namespace threadblock { /// Shared storage object needed by accumulator /// From 13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h template < typename Shape_, typename Element_, typename Layout_, typename Padding_> class AccumulatorSharedStorage { public: // // Type definitions // using Shape = Shape_; using Element = Element_; using Layout = Layout_; using Padding = Padding_; /// Tensor reference to the accumulator using TensorRefAccum = cutlass::TensorRef<Element, Layout>; /// Shape of the accumulator matrix in shared memory using ShapeAccum = cutlass:: MatrixShape<Shape::kM + Padding::kRow, Shape::kN + Padding::kColumn>; public: // // Data members // /// Buffer for accumulator cutlass::AlignedBuffer<Element, ShapeAccum::kCount> accum; public: // // Methods // /// Returns a layout object for the Accum matrix CUTLASS_DEVICE static Layout LayoutAccum() { return Layout::packed({ShapeAccum::kRow, ShapeAccum::kColumn}); } /// Returns a TensorRef to the Accumulator CUTLASS_HOST_DEVICE TensorRefAccum accum_ref() { return TensorRefAccum{accum.data(), LayoutAccum()}; } }; //////////////////////////////////////////////////////////////////////////////// // Taken from // https://github.com/NVIDIA/cutlass/blob/master/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h //////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, // Maximum K dimension - also the dimension of the shared-memory // holding `OperandA` int kMaxK_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Layout in shared-memory of operand A typename SmemLayoutA, /// Used for partial specialization typename Enable = bool> class MmaBaseFromSharedMemory { public: ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; static constexpr int kMaxK = kMaxK_; ///< Policy describing tuning details using Policy = Policy_; // // Dependent types // /// Warp-level Mma using Operator = typename Policy::Operator; /// Shape describing the overall GEMM computed from shared memory /// by each warp. using WarpGemm = typename Policy::Operator::Shape; /// Shape describing the number of warps filling the CTA using WarpCount = GemmShape< Shape::kM / WarpGemm::kM, Shape::kN / WarpGemm::kN, Shape::kK / WarpGemm::kK>; using WarpCount1 = WarpCount; /// Number of warp-level GEMM oeprations static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK); static int const kWarpGemmIterations1 = kWarpGemmIterations; /// Number of stages static int const kStages = Stages; /// If this is true, we fill the entire shmem buffer at start /// and don't need to iterate through it in a circular fashion static bool const kSmemContainsEntireB = kMaxK <= Shape::kK * kStages; /// Tensor reference to the A operand using TensorRefA = TensorRef<typename Operator::ElementA, SmemLayoutA>; /// Tensor reference to the B operand using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>; // // Nested structs // /// Shared storage object needed by threadblock-scoped GEMM class SharedStorage { public: // // Type definitions // /// Shape of the B matrix operand in shared memory using ShapeB = MatrixShape< Shape::kK * kStages + Policy::SmemPaddingB::kRow, Shape::kN + Policy::SmemPaddingB::kColumn>; public: // // Data members // /// Buffer for B operand AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B; public: // // Methods // /// Returns a layout object for the B matrix CUTLASS_HOST_DEVICE static typename Operator::LayoutB LayoutB() { return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn}); } /// Returns a TensorRef to the B operand CUTLASS_HOST_DEVICE TensorRefB operand_B_ref() { return TensorRefB{operand_B.data(), LayoutB()}; } }; protected: // // Data members // // /// Iterator to load a warp-scoped tile of A operand from shared memory // typename Operator::IteratorA warp_tile_iterator_A_; /// Iterator to load a warp-scoped tile of B operand from shared memory typename Operator::IteratorB warp_tile_iterator_B_; public: /// Construct from tensor references CUTLASS_DEVICE MmaBaseFromSharedMemory( ///< Shared storage needed for internal use by threadblock-scoped GEMM TensorRefB& b_tile, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx) : warp_tile_iterator_B_(b_tile, lane_idx) {} }; namespace { // has necessary trait compliance with WarpIteratorFromSmem but doesn't do // anything, can be default initialized, and uses fragment that takes up // (almost) no space. this warp iterator is selected at compile time when // elementwise on-the-fly scaling for operand A is disabled, in which case // operations related to loading scale factors for operand A get wiped out by // the compiler. template <typename TensorRef> class NoOpWarpIteratorScale { public: // in pipelined+multistage MMA implementations we keep an array of fragments. // if we aren't using scaling we don't want to waste registers on fragments // of scale elements, so ideally this would be sized 0. // Since arrays of zero-sized objects are not allowed, using size as 1. // The compiler will most likely wipe it out anyways. using Fragment = cutlass::Array<char, 1>; CUTLASS_HOST_DEVICE NoOpWarpIteratorScale() {} CUTLASS_HOST_DEVICE NoOpWarpIteratorScale(TensorRef const&, int) {} CUTLASS_HOST_DEVICE NoOpWarpIteratorScale& add_tile_offset( typename TensorRef::TensorCoord const&) { return *this; } CUTLASS_HOST_DEVICE NoOpWarpIteratorScale& operator++() { return *this; } CUTLASS_DEVICE void load(Fragment&) const {} }; // if scaling is enabled, performs fragment elementwise multiplication between // fragment and its scaling factor. template <typename Fragment, typename FragmentScale, bool ScalingEnabled> class FragmentElementwiseScaler; // specialization for scaling being enabled. template <typename Fragment, typename FragmentScale> class FragmentElementwiseScaler<Fragment, FragmentScale, true> { public: // cast scale_frag to correct type then apply elementwise to fragment CUTLASS_DEVICE static Fragment apply(Fragment frag, FragmentScale const& scale_frag) { Fragment converted_scale_frag = cutlass::NumericArrayConverter< typename Fragment::Element, typename FragmentScale::Element, FragmentScale::kElements>()(scale_frag); return cutlass::multiplies<Fragment>()(frag, converted_scale_frag); } }; // specialization for scaling being disabled. doesn't do anything and should // just get wiped out by the compiler. template <typename Fragment, typename FragmentScale> class FragmentElementwiseScaler<Fragment, FragmentScale, false> { public: CUTLASS_DEVICE static Fragment apply(Fragment frag, FragmentScale const&) { return frag; } }; } // namespace //////////////////////////////////////////////////////////////////////////////// // Taken from // https://github.com/NVIDIA/cutlass/blob/master/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_pipelined_smem_accumulator.h //////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, // BEGIN smem /// Iterates over the intermediate accumulator tile in shared memory typename WarpIteratorA_, /// whether or not to perform elementwise multiplication of A // by another matrix (A_scale) that is also kept in shared memory prior // to matmul A @ B bool ScaleOperandA_, /// Max GEMM problem size in K dimension int MaxK, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB_, /// Data type of accumulator matrix typename ElementC_, /// Data type of accumulator matrix typename LayoutC_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Transformation applied to B operand typename TransformB_ = NumericArrayConverter< typename SmemIteratorB_::Element, typename IteratorB_::Element, IteratorB_::Fragment::kElements>, /// Used for partial specialization typename Enable = bool> class MmaPipelinedFromSharedMemory : public MmaBaseFromSharedMemory< Shape_, MaxK, Policy_, 2, typename WarpIteratorA_::Layout> { public: ///< Base class using Base = MmaBaseFromSharedMemory< Shape_, MaxK, Policy_, 2, typename WarpIteratorA_::Layout>; using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<> static constexpr bool ScaleOperandA = ScaleOperandA_; using WarpIteratorA = WarpIteratorA_; ///< loads fragments of A_scale from shared memory if operand A scaling is ///< enabled. otherwise no-op. using WarpIteratorAScale = typename cutlass::platform::conditional< ScaleOperandA, WarpIteratorA, NoOpWarpIteratorScale<typename WarpIteratorA::TensorRef>>::type; using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory using ElementC = ElementC_; ///< Data type of accumulator matrix using LayoutC = LayoutC_; ///< Layout of accumulator matrix using Policy = Policy_; ///< Policy describing tuning details using SmemIteratorB = SmemIteratorB_; using TransformB = TransformB_; // // Dependent types // /// Fragment of operand B loaded from global memory using FragmentB = typename IteratorB::Fragment; /// Fragment of accumulator tile using FragmentC = typename Policy::Operator::FragmentC; /// Warp-level Mma using Operator = typename Policy::Operator; /// Obtain the arch tag from the warp-level operator using ArchTag = typename Policy::Operator::ArchTag; /// Complex transform on B operand static ComplexTransform const kTransformB = Operator::kTransformB; // staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline) static_assert( (Base::kStages == 2), "MmaPipelined requires kStages set to value 2"); private: using WarpFragmentA = typename Operator::FragmentA; /// fragment type of OperandA elementwise scaling matrix. (almost) empty /// if operand A scaling is disabled. using WarpFragmentAScale = typename WarpIteratorAScale::Fragment; using WarpFragmentB = typename Operator::FragmentB; /// applies scaling factor to operand A fragment if operand A scaling is /// enabled. otherwise no-op. using FragmentAScaler = FragmentElementwiseScaler< WarpFragmentA, WarpFragmentAScale, ScaleOperandA>; protected: // /// Iterator to write threadblock-scoped tile of A operand to shared memory // SmemIteratorA smem_iterator_A_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB smem_iterator_B_; /// Iterator to load a warp-scoped tile of A operand from intermediate /// accumulator tile WarpIteratorA warp_tile_iterator_A_; /// Iterator to load a warp-scoped tile of A_scale from intermediate /// accumulator tile (only used if ScaleOperandA_ is true) WarpIteratorAScale warp_tile_iterator_A_scale_; public: /// constructor for MMA with operand A scaling enabled. CUTLASS_DEVICE MmaPipelinedFromSharedMemory( typename Base::TensorRefA a, // Operand A in shared memory typename Base::TensorRefA a_scale, // Operand A_scale in shared memory typename Base::TensorRefB b_staging, // staging memory for loading tiles of B int thread_idx, int warp_idx, int lane_idx) : Base(b_staging, thread_idx, warp_idx, lane_idx), warp_tile_iterator_A_(a, lane_idx), warp_tile_iterator_A_scale_(a_scale, lane_idx), smem_iterator_B_(b_staging, thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_A_scale_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_B_.add_tile_offset( {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); } /// Construct from tensor references CUTLASS_DEVICE MmaPipelinedFromSharedMemory( typename Base::TensorRefA a, ///< Operand A in shared memory typename Base::TensorRefB b_staging, ///< staging memory for loading B int thread_idx, ///< ID within the threadblock int warp_idx, ///< ID of warp int lane_idx) ///< ID of each thread within a warp : Base(b_staging, thread_idx, warp_idx, lane_idx), warp_tile_iterator_A_(a, lane_idx), smem_iterator_B_(b_staging, thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_B_.add_tile_offset( {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); } // For API compatibility with MmaMultistageFromSharedMemory // but not supported as it worsens perf: older gpus < sm80 don't // support async tranfers and have to waste registers CUTLASS_DEVICE void set_prologue_done(bool value) {} CUTLASS_DEVICE static void prologue( typename Base::SharedStorage& shared_storage, IteratorB iterator_B1, int thread_idx, int problem_size_0_n) {} CUTLASS_DEVICE static void drain_cp_asyncs() {} /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( int gemm_k_iterations, ///< number of iterations of the mainloop FragmentC& accum, ///< destination accumulator tile // IteratorA iterator_A, ///< iterator over A // operand in global memory IteratorB iterator_B, ///< iterator over B operand in global memory FragmentC const& src_accum, ///< source accumulator tile // TransformA transform_A = TransformA(), ///< transformation // applied to A fragment TransformB transform_B = TransformB()) { ///< transformation applied to B fragment // // Prologue // // Perform accumulation in the 'd' output operand accum = src_accum; FragmentB tb_frag_B; tb_frag_B.clear(); // The last kblock is loaded in the prolog iterator_B.set_residual_tile(gemm_k_iterations == 1); iterator_B.load(tb_frag_B); ++iterator_B; this->smem_iterator_B_.store(transform_B(tb_frag_B)); ++this->smem_iterator_B_; __syncthreads(); // remember that WarpFragmentAScale and WarpIteratorAScale are empty/no-op // if scaling is disabled. // Pair of fragments used to overlap shared memory loads and math // instructions WarpFragmentA warp_frag_A[2]; WarpFragmentAScale warp_frag_A_scale[2]; WarpFragmentB warp_frag_B[2]; warp_frag_A[0].clear(); warp_frag_A_scale[0].clear(); warp_frag_B[0].clear(); this->warp_tile_iterator_B_.set_kgroup_index(0); this->warp_tile_iterator_A_.load(warp_frag_A[0]); this->warp_tile_iterator_A_scale_.load(warp_frag_A_scale[0]); this->warp_tile_iterator_B_.load(warp_frag_B[0]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_A_scale_; ++this->warp_tile_iterator_B_; Operator warp_mma; int smem_write_stage_idx = 1; // Avoid reading out of bounds iterator_B.set_residual_tile(gemm_k_iterations == 2); iterator_B.clear_mask(gemm_k_iterations <= 1); // Issue loads during the first warp-level matrix multiply-add *AFTER* // issuing shared memory loads (which have the tightest latency // requirement). // // Mainloop // // Note: The main loop does not support Base::kWarpGemmIterations == 2. CUTLASS_GEMM_LOOP for (; gemm_k_iterations > 0; --gemm_k_iterations) { // // Loop over GEMM K dimension // CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. bool hasNext = true; if (warp_mma_k == Base::kWarpGemmIterations - 1) { if (gemm_k_iterations > 1) { // Write fragments to shared memory this->smem_iterator_B_.store(transform_B(tb_frag_B)); } __syncthreads(); ++this->smem_iterator_B_; // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory SMEM: Don't reset iterator A, as // we are continuing our iteration at this point if (smem_write_stage_idx == 1) { this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); } else { this->warp_tile_iterator_B_.add_tile_offset( {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); } smem_write_stage_idx ^= 1; hasNext = gemm_k_iterations > 1; } // Only read the next if we need to if (hasNext) { this->warp_tile_iterator_B_.set_kgroup_index( (warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_A_scale_.load( warp_frag_A_scale[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_A_scale_; ++this->warp_tile_iterator_B_; if (warp_mma_k == 0) { iterator_B.load(tb_frag_B); ++iterator_B; // Avoid reading out of bounds if this was the last loop iteration iterator_B.set_residual_tile(gemm_k_iterations == 3); iterator_B.clear_mask(gemm_k_iterations <= 2); } } warp_mma( accum, FragmentAScaler::apply( warp_frag_A[warp_mma_k % 2], warp_frag_A_scale[warp_mma_k % 2]), warp_frag_B[warp_mma_k % 2], accum); } } } }; //////////////////////////////////////////////////////////////////////////////// // Taken from // https://github.com/NVIDIA/cutlass/blob/master/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_multistage_smem_accumulator.h //////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape1_, /// Iterates over the intermediate accumulator tile in shared memory typename WarpIteratorA1_, /// whether or not to perform elementwise multiplication of A // by another matrix (A_scale) that is also kept in shared memory prior // to matmul A @ B bool ScaleOperandA_, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB1_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB1_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB1, /// Data type of accumulator matrix typename ElementC_, /// Data type of accumulator matrix typename LayoutC_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy1_, /// Number of stages, int Stages_, int kMaxK_, /// Used for partial specialization typename Enable = bool> class MmaMultistageFromSharedMemory : public MmaBaseFromSharedMemory< Shape1_, kMaxK_, Policy1_, Stages_, typename WarpIteratorA1_::Layout> { public: ///< Base class using Base = MmaBaseFromSharedMemory< Shape1_, kMaxK_, Policy1_, Stages_, typename WarpIteratorA1_::Layout>; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape1 = Shape1_; ///< Iterates over tiles of B operand in global memory using IteratorB1 = IteratorB1_; using IteratorB = IteratorB1; ///< Policy describing tuning details using Policy1 = Policy1_; using SmemIteratorB1 = SmemIteratorB1_; using WarpIteratorA1 = WarpIteratorA1_; ///< Iterates over the intermediate ///< accumulator tile in shared memory static constexpr bool ScaleOperandA = ScaleOperandA_; ///< warp level iterator over A_scale matrix tile kept in shared memory. ///< if elementwise A scaling is disabled then everything this does is no-op. using WarpIteratorAScale = typename cutlass::platform::conditional< ScaleOperandA, WarpIteratorA1, NoOpWarpIteratorScale<typename WarpIteratorA1::TensorRef>>::type; ///< Data type of accumulator matrix using ElementC = ElementC_; ///< Layout of accumulator matrix using LayoutC = LayoutC_; static cutlass::arch::CacheOperation::Kind const kCacheOpB1 = CacheOpB1; static constexpr bool kSmemContainsEntireB = Base::kSmemContainsEntireB; // // Dependent types // /// Fragment of accumulator tile using FragmentC1 = typename Policy1::Operator::FragmentC; using FragmentC = FragmentC1; /// Warp-level Mma using Operator1 = typename Policy1::Operator; /// Minimum architecture is Sm80 to support cp.async using ArchTag = arch::Sm80; /// Complex transform on B operand static ComplexTransform const kTransformB1 = Operator1::kTransformB; /// Internal structure exposed for introspection. struct Detail { static_assert( Base::kWarpGemmIterations1 > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); /// Number of cp.async instructions to load one stage of operand B static int const TBLoadIterationsB1 = IteratorB1::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB1 = (TBLoadIterationsB1 + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1; }; static constexpr int kNumStagesConcurrentLoad = kSmemContainsEntireB ? Base::kStages : Base::kStages - 1; private: using WarpLoadedFragmentA1 = typename Operator1::FragmentA; /// fragment of OperandA scale matrix. if operand A scaling is disabled this /// is (almost) empty. using WarpLoadedFragmentA1Scale = typename WarpIteratorAScale::Fragment; using WarpLoadedFragmentB1 = typename Operator1::FragmentB; using WarpTransformedFragmentA1 = typename Operator1::TransformedFragmentA; using WarpTransformedFragmentB1 = typename Operator1::TransformedFragmentB; /// applies elementwise scaling to fragment of A. if operand A scaling is /// disabled this is a no-op. using FragmentAScaler = FragmentElementwiseScaler< WarpLoadedFragmentA1, WarpLoadedFragmentA1Scale, ScaleOperandA>; private: // // Data members // /// Iterator to load a warp-scoped tile of A1 operand from intermediate /// accumulator tile WarpIteratorA1 warp_tile_iterator_A1_; /// Iterator to load a warp-scoped tile of A1_scale operand from shared memory /// if operand A scaling is disabled everything this does is a no-op. WarpIteratorAScale warp_tile_iterator_A1_scale_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB1 smem_iterator_B1_; bool prologue_done_; public: /// constructor for MMA with operand A scaling enabled. CUTLASS_DEVICE MmaMultistageFromSharedMemory( typename Base::TensorRefA a, typename Base::TensorRefA a_scale, typename Base::TensorRefB b_tile, int thread_idx, int warp_idx, int lane_idx) : Base(b_tile, thread_idx, warp_idx, lane_idx), warp_tile_iterator_A1_(a, lane_idx), warp_tile_iterator_A1_scale_(a_scale, lane_idx), smem_iterator_B1_(b_tile, thread_idx), prologue_done_(false) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn_1 = warp_idx % (Base::WarpCount1::kM * Base::WarpCount1::kN); int warp_idx_k_1 = warp_idx / (Base::WarpCount1::kM * Base::WarpCount1::kN); int warp_idx_m_1 = warp_idx_mn_1 % Base::WarpCount1::kM; int warp_idx_n_1 = warp_idx_mn_1 / Base::WarpCount1::kM; // Add per-warp offsets in units of warp-level tiles warp_tile_iterator_A1_.add_tile_offset( {warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1}); warp_tile_iterator_A1_scale_.add_tile_offset( {warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1}); this->warp_tile_iterator_B_.add_tile_offset( {Base::kWarpGemmIterations1 * warp_idx_k_1, warp_idx_n_1}); } /// Construct from tensor references CUTLASS_DEVICE MmaMultistageFromSharedMemory( typename Base::TensorRefA a, typename Base::TensorRefB b_tile, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx) : Base(b_tile, thread_idx, warp_idx, lane_idx), warp_tile_iterator_A1_(a, lane_idx), smem_iterator_B1_(b_tile, thread_idx), prologue_done_(false) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn_1 = warp_idx % (Base::WarpCount1::kM * Base::WarpCount1::kN); int warp_idx_k_1 = warp_idx / (Base::WarpCount1::kM * Base::WarpCount1::kN); int warp_idx_m_1 = warp_idx_mn_1 % Base::WarpCount1::kM; int warp_idx_n_1 = warp_idx_mn_1 / Base::WarpCount1::kM; // Add per-warp offsets in units of warp-level tiles warp_tile_iterator_A1_.add_tile_offset( {warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1}); this->warp_tile_iterator_B_.add_tile_offset( {Base::kWarpGemmIterations1 * warp_idx_k_1, warp_idx_n_1}); } CUTLASS_DEVICE void set_prologue_done(bool value) { prologue_done_ = value; } CUTLASS_DEVICE static void prologue( typename Base::SharedStorage& shared_storage, IteratorB iterator_B1, int thread_idx, int problem_size_0_n) { SmemIteratorB1 smem_iterator_B1(shared_storage.operand_B_ref(), thread_idx); _prologue( iterator_B1, (problem_size_0_n + Base::Shape::kK - 1) / Base::Shape::kK, smem_iterator_B1); } CUTLASS_DEVICE static void drain_cp_asyncs() { // commit and drain all pending and predicated cp.async pnz from the GEMM // mainloop cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } CUTLASS_DEVICE void copy_tiles_and_advance_1( IteratorB1& iterator_B1, int group_start_B1 = 0) { iterator_B1.set_iteration_index( group_start_B1 * IteratorB1::kAccessesPerVector); this->smem_iterator_B1_.set_iteration_index(group_start_B1); // Load for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB1; ++j) { if (group_start_B1 + j < Detail::TBLoadIterationsB1) { typename IteratorB1::AccessType* dst_ptr = reinterpret_cast<typename IteratorB1::AccessType*>( this->smem_iterator_B1_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value * IteratorB1::ThreadMap::kElementsPerAccess / IteratorB1::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_B1.get(); cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>( dst_ptr + v, gmem_ptr, iterator_B1.valid()); ++iterator_B1; } ++this->smem_iterator_B1_; } } } CUTLASS_DEVICE static void _prologue( IteratorB& iterator_B1, int32_t gemm_k_iterations_1, SmemIteratorB1& smem_iterator_B1_) { // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < kNumStagesConcurrentLoad; ++stage, --gemm_k_iterations_1) { iterator_B1.set_residual_tile(gemm_k_iterations_1 == 1); iterator_B1.clear_mask(gemm_k_iterations_1 == 0); iterator_B1.set_iteration_index(0); smem_iterator_B1_.set_iteration_index(0); // Load for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::TBLoadIterationsB1; ++j) { typename IteratorB1::AccessType* dst_ptr = reinterpret_cast<typename IteratorB1::AccessType*>( smem_iterator_B1_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value * IteratorB1::ThreadMap::kElementsPerAccess / IteratorB1::kAccessesPerVector / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>( dst_ptr + v, iterator_B1.get(), iterator_B1.valid()); ++iterator_B1; } ++smem_iterator_B1_; } // Move to the next stage iterator_B1.add_tile_offset({1, 0}); smem_iterator_B1_.add_tile_offset({1, 0}); // Defines the boundary of a stage of cp.async. cutlass::arch::cp_async_fence(); } iterator_B1.set_residual_tile(gemm_k_iterations_1 == 1); iterator_B1.clear_mask(gemm_k_iterations_1 == 0); } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations_1_, ///< destination accumulator tile FragmentC1& accum, ///< iterator over B1 operand in global memory IteratorB1 iterator_B1, ///< initial value of accumulator FragmentC1 const& src_accum) { // 2nd Gemm // // Prologue // // Perform accumulation in the 'd' output operand accum = src_accum; if (!prologue_done_) { _prologue(iterator_B1, gemm_k_iterations_1_, smem_iterator_B1_); } else if (!kSmemContainsEntireB) { // Restore the iterators increments int gemm_k_iterations_1 = gemm_k_iterations_1_; // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < kNumStagesConcurrentLoad; ++stage, --gemm_k_iterations_1) { iterator_B1.set_iteration_index(0); this->smem_iterator_B1_.set_iteration_index(0); // Load for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::TBLoadIterationsB1; ++j) { CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) { ++iterator_B1; } ++this->smem_iterator_B1_; } iterator_B1.add_tile_offset({1, 0}); this->smem_iterator_B1_.add_tile_offset({1, 0}); } iterator_B1.set_residual_tile(gemm_k_iterations_1 <= 1); iterator_B1.clear_mask(gemm_k_iterations_1 <= 0); } // DEPBAR+SYNC cutlass::arch::cp_async_wait<kNumStagesConcurrentLoad - 1>(); __syncthreads(); // remember that WarpFragmentAScale and WarpIteratorAScale are no-op/empty // if scaling is disabled. // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA1 warp_loaded_frag_A1[2]; WarpLoadedFragmentA1Scale warp_loaded_frag_A1_scale[2]; WarpLoadedFragmentB1 warp_loaded_frag_B1[2]; WarpTransformedFragmentA1 warp_transformed_frag_A1[2]; WarpTransformedFragmentB1 warp_transformed_frag_B1[2]; Operator1 warp_mma1; warp_tile_iterator_A1_.load(warp_loaded_frag_A1[0]); ++warp_tile_iterator_A1_; warp_tile_iterator_A1_scale_.load(warp_loaded_frag_A1_scale[0]); ++warp_tile_iterator_A1_scale_; this->warp_tile_iterator_B_.set_kgroup_index(0); this->warp_tile_iterator_B_.load(warp_loaded_frag_B1[0]); ++this->warp_tile_iterator_B_; int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma1.transform( warp_transformed_frag_A1[0], warp_transformed_frag_B1[0], FragmentAScaler::apply( warp_loaded_frag_A1[0], warp_loaded_frag_A1_scale[0]), warp_loaded_frag_B1[0]); // tf32x3 kernels use staging accumulation. warp_mma uses a temporary // accumulator and this temporary accumulator is added to the final // accumulator once in every mainloop iteration. plus<FragmentC1> plus_accum; FragmentC1 tmp_accum; if (platform::is_same< typename Operator1::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same< typename Operator1::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { tmp_accum.clear(); } // // Mainloop // CUTLASS_PRAGMA_UNROLL for (int gemm_k_iterations_1 = gemm_k_iterations_1_ - (Base::kStages - 1); gemm_k_iterations_1 > (-Base::kStages + 1); gemm_k_iterations_1--) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) { // Load warp-level tile from accumulator fragment (A) // or shared memory (operand B) this->warp_tile_iterator_B_.set_kgroup_index( (warp_mma_k + 1) % Base::kWarpGemmIterations1); // skip warp tile loading for the last kgroup (we are out of the buf) if (gemm_k_iterations_1 > (-Base::kStages + 2) || warp_mma_k < Base::kWarpGemmIterations1 - 1) { warp_tile_iterator_A1_.load( warp_loaded_frag_A1[(warp_mma_k + 1) % 2]); warp_tile_iterator_A1_scale_.load( warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B_.load( warp_loaded_frag_B1[(warp_mma_k + 1) % 2]); } ++warp_tile_iterator_A1_; ++warp_tile_iterator_A1_scale_; ++this->warp_tile_iterator_B_; if (warp_mma_k > 0) warp_mma1.transform( warp_transformed_frag_A1[warp_mma_k % 2], warp_transformed_frag_B1[warp_mma_k % 2], FragmentAScaler::apply( warp_loaded_frag_A1[warp_mma_k % 2], warp_loaded_frag_A1_scale[warp_mma_k % 2]), warp_loaded_frag_B1[warp_mma_k % 2]); if (platform::is_same< typename Operator1::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same< typename Operator1::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { warp_mma1( tmp_accum, warp_transformed_frag_A1[warp_mma_k % 2], warp_transformed_frag_B1[warp_mma_k % 2], tmp_accum); if (warp_mma_k == 0) { accum = plus_accum(accum, tmp_accum); tmp_accum.clear(); } } else { warp_mma1( accum, warp_transformed_frag_A1[warp_mma_k % 2], warp_transformed_frag_B1[warp_mma_k % 2], accum); } // Issue global->shared copies for the this stage if (warp_mma_k < Base::kWarpGemmIterations1 - 1) { int group_start_iteration_B1; group_start_iteration_B1 = warp_mma_k * Detail::kAccessesPerGroupB1; if (!kSmemContainsEntireB) { copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1); } } if (warp_mma_k + 2 == Base::kWarpGemmIterations1) { int group_start_iteration_B1; group_start_iteration_B1 = (warp_mma_k + 1) * Detail::kAccessesPerGroupB1; if (!kSmemContainsEntireB) { copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1); } // Inserts a memory fence between stages of cp.async instructions. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages have committed. arch::cp_async_wait<kNumStagesConcurrentLoad - 1>(); __syncthreads(); // Move to the next stage iterator_B1.add_tile_offset({1, 0}); this->smem_iterator_B1_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (!kSmemContainsEntireB) { if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_B_.add_tile_offset( {-Base::kStages * Policy1::kPartitionsK * Base::kWarpGemmIterations1, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } } iterator_B1.set_residual_tile(gemm_k_iterations_1 == 2); iterator_B1.clear_mask(gemm_k_iterations_1 == 1); } // Do any conversions feeding the first stage at the end of the loop so // we can start right away on mma instructions if (warp_mma_k + 1 == Base::kWarpGemmIterations1) warp_mma1.transform( warp_transformed_frag_A1[(warp_mma_k + 1) % 2], warp_transformed_frag_B1[(warp_mma_k + 1) % 2], FragmentAScaler::apply( warp_loaded_frag_A1[(warp_mma_k + 1) % 2], warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2]), warp_loaded_frag_B1[(warp_mma_k + 1) % 2]); } } if (platform::is_same< typename Operator1::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same< typename Operator1::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { accum = plus_accum(accum, tmp_accum); } } }; // Converts a "regular" Mma into their counterpart from shared memory template < typename Mma_, int kMaxK, typename WarpIteratorA_, /// whether or not to apply elementwise multiplication of operand A by /// another matrix in shared memory before usage in A @ B bool kScaleOperandA, bool kTransposeA = false> struct DefaultMmaFromSharedMemory; // Mma pipelined template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA_, typename WarpIteratorA_, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB_, /// Data type of accumulator matrix typename ElementC_, /// Data type of accumulator matrix typename LayoutC_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Transformation applied to A operand typename TransformA_, /// Transformation applied to B operand typename TransformB_, // Max MMA problem size K int kMaxK, /// whether or not to apply elementwise multiplication of operand A by /// another matrix in shared memory before usage in A @ B bool kScaleOperandA, bool kTransposeA> struct DefaultMmaFromSharedMemory< MmaPipelined< Shape_, IteratorA_, SmemIteratorA_, IteratorB_, SmemIteratorB_, ElementC_, LayoutC_, Policy_, TransformA_, TransformB_>, kMaxK, WarpIteratorA_, kScaleOperandA, kTransposeA> { using RegularMma = MmaPipelined< Shape_, IteratorA_, SmemIteratorA_, IteratorB_, SmemIteratorB_, ElementC_, LayoutC_, Policy_, TransformA_, TransformB_>; using WarpShape = typename Policy_::Operator::Shape; using InstructionShape = typename Policy_::Operator::InstructionShape; using ArchMmaOperator = typename Policy_::Operator; static constexpr bool kIsTransposedA = false; using WarpIteratorA = WarpIteratorA_; using IteratorB = typename cutlass::transform::threadblock::MakeIteratorResidualLast< IteratorB_>::Iterator; using Mma = typename cutlass::gemm::threadblock::MmaPipelinedFromSharedMemory< Shape_, WarpIteratorA, kScaleOperandA, kMaxK, IteratorB, SmemIteratorB_, ElementC_, LayoutC_, Policy_>; }; template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA_, typename WarpIteratorA_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// Data type of accumulator matrix typename ElementC_, /// Data type of accumulator matrix typename LayoutC_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// Number of stages, int Stages, /// Use zfill or predicate for out-of-bound cp.async SharedMemoryClearOption SharedMemoryClear, int kMaxK, /// whether or not to apply elementwise multiplication of operand A by /// another matrix in shared memory before usage in A @ B bool kScaleOperandA, bool kTransposeA> struct DefaultMmaFromSharedMemory< MmaMultistage< Shape_, IteratorA_, SmemIteratorA_, CacheOpA, IteratorB_, SmemIteratorB_, CacheOpB, ElementC_, LayoutC_, Policy_, Stages, SharedMemoryClear>, kMaxK, WarpIteratorA_, kScaleOperandA, kTransposeA> { using RegularMma = MmaMultistage< Shape_, IteratorA_, SmemIteratorA_, CacheOpA, IteratorB_, SmemIteratorB_, CacheOpB, ElementC_, LayoutC_, Policy_, Stages, SharedMemoryClear>; using WarpShape = typename Policy_::Operator::Shape; using InstructionShape = typename Policy_::Operator::InstructionShape; using WarpIteratorTranspose = TransposeWarpIterator<WarpIteratorA_>; static constexpr bool kIsTransposedA = WarpIteratorTranspose::kSupportsTranspose && kTransposeA; using WarpIteratorA = typename platform::conditional< kIsTransposedA, typename WarpIteratorTranspose::Iterator, WarpIteratorA_>::type; // Reduce the number of stages if we don't need that many static int constexpr kStagesMax = (kMaxK + int(Shape_::kK) - 1) / int(Shape_::kK); static int constexpr kStages = cutlass::const_min(Stages, kStagesMax); using IteratorB = typename cutlass::transform::threadblock::MakeIteratorResidualLast< IteratorB_>::Iterator; using Mma = typename cutlass::gemm::threadblock::MmaMultistageFromSharedMemory< Shape_, WarpIteratorA, kScaleOperandA, IteratorB, SmemIteratorB_, RegularMma::kCacheOpB, ElementC_, LayoutC_, Policy_, kStages, kMaxK>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename IteratorC, typename Operator, typename scalar_t, typename WarpShape_, typename ThreadblockShape_> struct B2bGemm; // Tensor Cores >= Sm75 specialization (Ampere ...) template < /// Size of the matrix to load (concept: MatrixShape) typename Shape_, /// Element type typename Element_, /// Layout of operand in memory typename Layout_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, /// Interval between adjacent *MMA instructions (in units of MMA /// instructions, concept: MatrixShape) typename OpDelta_, typename Operator, typename scalar_t, typename WarpShape_, typename ThreadblockShape_> struct B2bGemm< cutlass::gemm::warp::MmaTensorOpAccumulatorTileIterator< Shape_, Element_, Layout_, InstructionShape_, OpDelta_>, Operator, scalar_t, WarpShape_, ThreadblockShape_> { using IteratorC = typename cutlass::gemm::warp::MmaTensorOpAccumulatorTileIterator< Shape_, Element_, Layout_, InstructionShape_, OpDelta_>; using FragmentC = typename IteratorC::Fragment; using InstructionShape = InstructionShape_; using WarpShape = WarpShape_; using ThreadblockShape = ThreadblockShape_; using accum_t = Element_; using lse_scalar_t = float; using SmemAccumulatorLayout = cutlass::layout::RowMajor; // Iterator to load accumulators (results of matmul in registers) using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp< WarpShape, InstructionShape, accum_t, typename Operator::Policy::Operator::FragmentC, cutlass::layout::RowMajor>; // Iterator to store to shared-memory using SmemIteratorD0 = typename cutlass::epilogue::warp::TileIteratorTensorOp< WarpShape, InstructionShape, scalar_t, // accum_t, SmemAccumulatorLayout>; using AccumulatorSharedStorage = cutlass::gemm::threadblock::AccumulatorSharedStorage< ThreadblockShape, typename SmemIteratorD0::Element, typename SmemIteratorD0::TensorLayout, typename SmemIteratorD0::Padding>; // We need to provide an operation for the epilogue. Let's create an // operation that does nothing (ScaleType::Nothing), just converts // from accum_t (float) -> scalar_t (can be half) using OutputOpNoOp = cutlass::epilogue::thread::LinearCombination< typename SmemIteratorD0::Element, // ElementOutput FragmentIteratorAccumulator::Fragment::kElements, accum_t, // ElementAccumulator typename SmemIteratorD0::Element, // ElementCompute cutlass::epilogue::thread::ScaleType::Nothing>; using Epilogue = cutlass::epilogue::threadblock::EpilogueSmemAccumulator< SmemIteratorD0, FragmentIteratorAccumulator, SmemIteratorD0, // ScaleBiasIterator - not used OutputOpNoOp>; // Epilogue 2: with LSE (for backwards pass) static int const kElementsPerAccess = 2; // TODO: Why 2? using IteratorAccumulatorLSE = cutlass::transform::threadblock::VectorIterator< cutlass::transform::threadblock::PredicatedVectorAccessIterator< // Shape cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kN>, // WarpShape cutlass::MatrixShape<WarpShape::kM, WarpShape::kN>, lse_scalar_t, cutlass::layout::RowMajor, kElementsPerAccess>>; using EpilogueOpApplyLSE = cutlass::epilogue::thread::ApplyLogSumExp< scalar_t, // ElementOutput_ lse_scalar_t, // ElementLSE_ accum_t, // ElementAccumulator_ accum_t, // ElementCompute_ 128 / cutlass::sizeof_bits<scalar_t>::value // FragmentIteratorAccumulator::Fragment::kElements // InstructionShape::kM * InstructionShape::kN / 32 >; using EpilogueWithLSE = cutlass::epilogue::threadblock::EpilogueSmemAccumulator< SmemIteratorD0, FragmentIteratorAccumulator, IteratorAccumulatorLSE, EpilogueOpApplyLSE>; static void CUTLASS_DEVICE accumToSmem( AccumulatorSharedStorage& shared_storage, FragmentC const& accum, int lane_id, cutlass::MatrixCoord const& tile_coords) { SmemIteratorD0 smem_iterator_attn(shared_storage.accum_ref(), lane_id); smem_iterator_attn.add_tile_offset( tile_coords * cutlass::MatrixCoord{ SmemIteratorD0::TileIterations::kRow, SmemIteratorD0::TileIterations::kColumn}); Epilogue epilogue; epilogue(OutputOpNoOp({}), smem_iterator_attn, accum); } static void CUTLASS_DEVICE accumApplyLSEToSmem( AccumulatorSharedStorage& shared_storage, FragmentC& accum, lse_scalar_t const* lse, int32_t lse_extents, int thread_id, int warp_id, int lane_id, cutlass::MatrixCoord const& tile_coords) { constexpr int32_t kAlignLSE = 32; IteratorAccumulatorLSE iterator_lse( lse, {(int32_t)0, (int32_t)ceil_div(lse_extents, kAlignLSE) * kAlignLSE}, thread_id, warp_id, cutlass::MatrixCoord{0, 0} // offset ); SmemIteratorD0 smem_iterator_attn(shared_storage.accum_ref(), lane_id); smem_iterator_attn.add_tile_offset( tile_coords * cutlass::MatrixCoord{ SmemIteratorD0::TileIterations::kRow, SmemIteratorD0::TileIterations::kColumn}); EpilogueWithLSE epilogue; EpilogueOpApplyLSE minus_lse_exp({}); epilogue( minus_lse_exp, smem_iterator_attn, accum, // scale - unused iterator_lse, // bias iterator_lse); } }; // Volta Specialization // only supported for f16 template <typename Operator, typename WarpShape_, typename ThreadblockShape_> struct B2bGemm< cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator< cutlass::MatrixShape<32, 32>, float, cutlass::layout::RowMajor, cutlass::gemm::GemmShape<16, 16, 4>, cutlass::MatrixShape<1, 1>>, Operator, cutlass::half_t, WarpShape_, ThreadblockShape_> { using IteratorC = cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator< cutlass::MatrixShape<32, 32>, float, cutlass::layout::RowMajor, cutlass::gemm::GemmShape<16, 16, 4>, cutlass::MatrixShape<1, 1>>; using scalar_t = cutlass::half_t; using accum_t = IteratorC::Element; using WarpShape = WarpShape_; using ThreadblockShape = ThreadblockShape_; using FragmentC = IteratorC::Fragment; using lse_scalar_t = float; // Storage in shared-memory for Q.Kt using SmemAccumulatorLayout = cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<16, 32>; using AccumulatorSharedStorage = cutlass::gemm::threadblock::AccumulatorSharedStorage< ThreadblockShape, scalar_t, SmemAccumulatorLayout, cutlass::MatrixShape<0, 0> // Padding >; using TensorRef = cutlass::TensorRef<scalar_t, SmemAccumulatorLayout>; using Policy = typename IteratorC::Policy; using Element = accum_t; // Those are MmaVoltaTensorOpAccumulatorTileIterator private fields // Let's copy their values static int const kElementsPerPartial = 4; using EleShapePerPatial = typename cutlass::platform::conditional< cutlass::platform::is_same<Element, float>::value, cutlass::MatrixShape<2, 2>, cutlass::MatrixShape<1, 4>>::type; static int const kElementsPerMma = 8; static int const kAccumulatorPatials = 2; using QuadShapePerPatialMma = cutlass::MatrixShape<4, 4>; static void CUTLASS_DEVICE accumToSmem( AccumulatorSharedStorage& shared_storage, FragmentC const& accum, int lane_id, cutlass::MatrixCoord const& tile_coords) { // ctor - from MmaVoltaTensorOpAccumulatorTileIterator TensorRef ref_(shared_storage.accum_ref()); int quad = (lane_id >> 2); int lane_in_quad = (lane_id & 3); int accum_m, accum_n; if (cutlass::platform::is_same<Element, float>::value) { // (quad[2],quad[0])+lane_in_quad[0] accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1); // (quad[1])+lane_in_quad[1] accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials + (lane_in_quad & 2); } else { accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + lane_in_quad; // (quad[2],quad[0]) accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials; } cutlass::MatrixCoord lane_offset(accum_m, accum_n); // Tile offset ref_.add_coord_offset( tile_coords * cutlass::MatrixCoord( {IteratorC::Shape::kRow, IteratorC::Shape::kColumn})); using AccessType = cutlass::Array<scalar_t, EleShapePerPatial::kColumn>; // store - from MmaVoltaTensorOpAccumulatorTileIterator CUTLASS_PRAGMA_UNROLL for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) { CUTLASS_PRAGMA_UNROLL for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) { CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { int mma_accum_start = (((tile_n * Policy::TileIterations::kRow + tile_m) * Policy::MmaIterations::kColumn + mma_n) * Policy::MmaIterations::kRow + mma_m) * kElementsPerMma; CUTLASS_PRAGMA_UNROLL for (int p = 0; p < kAccumulatorPatials; ++p) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < EleShapePerPatial::kRow; ++m) { int accum_m = tile_m * Policy::InterleavedTile::kRow + mma_m * QuadShapePerPatialMma::kRow + m * 2; int accum_n = tile_n * Policy::InterleavedTile::kColumn + mma_n * QuadShapePerPatialMma::kColumn + p * Policy::InterleavedTile::kColumn / 2; int r = (accum_m + lane_offset.row()); AccessType to_store; CUTLASS_PRAGMA_UNROLL for (int n = 0; n < EleShapePerPatial::kColumn; ++n) { int idx = mma_accum_start + p * kElementsPerPartial + m * EleShapePerPatial::kColumn + n; int c = (accum_n + n + lane_offset.column()); to_store[n] = scalar_t(accum[idx]); } int c = (accum_n + lane_offset.column()); assert(r < 32); assert(c < 32); *reinterpret_cast<AccessType*>( ref_.data() + ref_.offset({r, c})) = to_store; } } } } } } } static void CUTLASS_DEVICE accumApplyLSEToSmem( AccumulatorSharedStorage& shared_storage, typename IteratorC::Fragment& accum, lse_scalar_t const* lse, int lse_extent, int thread_id, int warp_id, int lane_id, cutlass::MatrixCoord const& tile_coords) { // Non-optimized way to apply LSE to registers // NOTE: accum is attn.T // TODO: Optimize for each architecture static constexpr int WarpSize = 32; using AccumLambdaIterator = typename DefaultMmaAccumLambdaIterator<IteratorC, accum_t, WarpSize>:: Iterator; auto lane_offset = AccumLambdaIterator::get_lane_offset(lane_id, warp_id, tile_coords); cutlass::Array<lse_scalar_t, IteratorC::Fragment::kElements> lse_prefetched; lse_prefetched.clear(); int rowIdx = 0; int colIdx = 0; AccumLambdaIterator::iterateRows( lane_offset, [&](int accum_m) { ++rowIdx; colIdx = 0; }, [&](int accum_m, int accum_n, int idx) { if (rowIdx == 1) { lse_prefetched[colIdx] = accum_n < lse_extent ? lse[accum_n] : platform::numeric_limits<accum_t>::infinity(); } accum[idx] = expf(accum[idx] - lse_prefetched[colIdx]); ++colIdx; }, [&](int accum_m) {}); accumToSmem(shared_storage, accum, lane_id, tile_coords); } }; // Simt Specialization // for f32 on Sm70-Sm75 and f16/f32 below template < typename Operator, typename OperatorPolicy, typename scalar_t, typename WarpShape_, typename ThreadblockShape_> struct B2bGemm< cutlass::gemm::warp::MmaSimtTileIterator< cutlass::MatrixShape<32, 32>, cutlass::gemm::Operand::kC, float, cutlass::layout::RowMajor, OperatorPolicy, 1, 1>, Operator, scalar_t, WarpShape_, ThreadblockShape_> { using IteratorC = cutlass::gemm::warp::MmaSimtTileIterator< cutlass::MatrixShape<32, 32>, cutlass::gemm::Operand::kC, float, cutlass::layout::RowMajor, OperatorPolicy, 1, 1>; using accum_t = typename IteratorC::Element; using WarpShape = WarpShape_; using ThreadblockShape = ThreadblockShape_; using FragmentC = typename IteratorC::Fragment; using lse_scalar_t = float; // Storage in shared-memory for Q.Kt using AccumulatorSharedStorage = cutlass::gemm::threadblock::AccumulatorSharedStorage< ThreadblockShape, scalar_t, cutlass::layout::ColumnMajor, cutlass::MatrixShape<0, 0> // Padding >; static void CUTLASS_DEVICE accumToSmem( AccumulatorSharedStorage& shared_storage, FragmentC const& accum, int lane_id, cutlass::MatrixCoord const& tile_coords) { using Policy = typename IteratorC::Policy; using Element = typename IteratorC::Element; using Iterations = typename IteratorC::Iterations; using Delta = typename IteratorC::Delta; auto ref_ = shared_storage.accum_ref(); // ctor - MmaSimtTileIterator // compute offset based on thread ID and lane layout typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); MatrixCoord lane_offset = lane_layout.inverse(lane_id) * MatrixCoord(Policy::LaneMmaShape::kM, Policy::LaneMmaShape::kN); ref_.add_coord_offset(lane_offset); // Tile offset ref_.add_coord_offset( tile_coords * cutlass::MatrixCoord( {IteratorC::Shape::kRow, IteratorC::Shape::kColumn})); // store - MmaSimtTileIterator CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) { int r = Policy::LaneMmaShape::kM * (mma_m * Policy::WarpShape::kRow) + m; int c = mma_n * Delta::kColumn + n; int idx = n + Policy::LaneMmaShape::kN * (mma_n + Iterations::kColumn * (m + mma_m * Policy::LaneMmaShape::kM)); ref_.at({r, c}) = scalar_t(accum[idx]); } } } } } static void CUTLASS_DEVICE accumApplyLSEToSmem( AccumulatorSharedStorage& shared_storage, typename IteratorC::Fragment& accum, lse_scalar_t const* lse, int lse_extent, int thread_id, int warp_id, int lane_id, cutlass::MatrixCoord const& tile_coords) { // Non-optimized way to apply LSE to registers // NOTE: accum is attn.T // TODO: Optimize for each architecture static constexpr int WarpSize = 32; using AccumLambdaIterator = typename DefaultMmaAccumLambdaIterator<IteratorC, accum_t, WarpSize>:: Iterator; auto lane_offset = AccumLambdaIterator::get_lane_offset(lane_id, warp_id, tile_coords); cutlass::Array<lse_scalar_t, IteratorC::Fragment::kElements> lse_prefetched; lse_prefetched.clear(); int rowIdx = 0; int colIdx = 0; AccumLambdaIterator::iterateRows( lane_offset, [&](int accum_m) { ++rowIdx; colIdx = 0; }, [&](int accum_m, int accum_n, int idx) { if (rowIdx == 1) { lse_prefetched[colIdx] = accum_n < lse_extent ? lse[accum_n] : platform::numeric_limits<accum_t>::infinity(); } accum[idx] = expf(accum[idx] - lse_prefetched[colIdx]); ++colIdx; }, [&](int accum_m) {}); accumToSmem(shared_storage, accum, lane_id, tile_coords); } }; } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
examples/41_fused_multi_head_attention/gemm/mma_from_smem.h/0
{ "file_path": "examples/41_fused_multi_head_attention/gemm/mma_from_smem.h", "repo_id": "examples", "token_count": 28622 }
7
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Block-Ell sparse gemm example. This example performs a Sparse-matrix dense-matrix multiplication (SpMM) operation. Matrix A is stored in the Blocked-Ellpack (Blocked-ELL) storage format. Details about the Blocked-Ellpack (Blocked-ELL) storage format can be found here: https://docs.nvidia.com/cuda/cusparse/index.html#cusparse-generic-spmat-create-blockedell Whereas matrix B is a dense matrix. Blocked-Ellpack or Blocked-ELL storage format comprises of two matrices. First is a packed matrix (ellValue matrix) that stores non-zero values in consecutive blocks, represented by tensor_a in this example. Second is a matrix of indices (ellColInd matrix), represented by tensor_ell_idx in this example, that represent the column indices of the corresponding non-zero blocks. All rows in the matrices must have the same number of blocks. ellColInd can contain -1 values for indicating empty blocks. These matrices store elements in row-major order. Description of parameters and tensors used to represent the Blocked-Ellpack (ELL) format for this example: a_rows - Rows in the sparse matrix. a_cols - Colums in the sparse matrix. a_ell_blocksize - Size of the ELL-Blocks. a_ell_num_columns - Number of columns in the Blocked-Ellpack format (ellValue columns) tensor_a - ellValue matrix, whose size is (a_rows * a_ell_num_columns) tensor_ell_idx - Blocked-ELL Column indices (ellColInd), whose size is (a_rows / a_ell_blocksize) * (a_ell_num_columns / a_ell_blocksize) tensor_b - Input dense matrix whose size is (a_cols * n) tensor_c/tensor_d - Output dense matrix whose size is (a_rows * n) {a_rows, n, a_cols} - Problem size */ ///////////////////////////////////////////////////////////////////////////////////////////////// #include <iostream> #include <fstream> #include <sstream> #include <vector> #include <unordered_map> #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/gemm_grouped.h" #include "cutlass/gemm/kernel/default_gemm_grouped.h" #include "cutlass/gemm/device/ell_gemm.h" #include "cutlass/util/command_line.h" #include "cutlass/util/distribution.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/host_uncompress.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double runtime_ms; double gflops; cutlass::Status status; cudaError_t error; bool passed; // // Methods // Result( double runtime_ms = 0, double gflops = 0, cutlass::Status status = cutlass::Status::kSuccess, cudaError_t error = cudaSuccess ): runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; bool reference_check; int iterations; int cuda_streams; int a_rows, n, a_cols; int a_ell_num_columns; int a_ell_blocksize; int a_base; float alpha; float beta; // // Methods // Options(): help(false), reference_check(true), iterations(20), cuda_streams(0), a_rows(1024), n(1024), a_cols(1024), a_ell_num_columns(512), a_ell_blocksize(16), a_base(0), alpha(1), beta() { } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } cmd.get_cmd_line_argument("alpha", alpha, 1.0f); cmd.get_cmd_line_argument("beta", beta, 0.0f); cmd.get_cmd_line_argument("iterations", iterations, 20); cmd.get_cmd_line_argument("streams", cuda_streams, 0); cmd.get_cmd_line_argument("reference-check", reference_check, true); cmd.get_cmd_line_argument("a_rows", a_rows, 1024); cmd.get_cmd_line_argument("n", n, 1024); cmd.get_cmd_line_argument("a_cols", a_cols, 1024); cmd.get_cmd_line_argument("a_ell_num_columns", a_ell_num_columns, 512); cmd.get_cmd_line_argument("a_ell_blocksize", a_ell_blocksize, 16); cmd.get_cmd_line_argument("a_base", a_base, 0); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "43_ell_block_sparse_gemm\n\n" << " This example profiles the performance of a ELL block sparse GEMM kernel.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --a_rows=<int> Sets the number of the rows of the sparse matrix.\n" << " --n=<int> Sets the N dimension.\n" << " --a_cols=<int> Sets the number of columns of the sparse matrix.\n" << " --a_ell_num_columns=<int> Sets the actual number of columns of the Blocked-Ellpack format.\n" << " --a_ell_blocksize=<int> Sets the size of the ELL-Block.\n" << " --a_base=<int> Sets the base index.\n" << " --alpha=<f32> Epilogue scalar alpha (real part)\n" << " --beta=<f32> Epilogue scalar beta (real part)\n\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --reference-check=<bool> If true, performs reference check.\n"; out << "\n\nExamples:\n\n" << "# Runs a 1024x1024x1024 ELL block sparse GEMM with 16x16 block size and actual 512 non-zero columns in A operand\n" << "$ ./examples/43_ell_block_sparse_gemm/43_ell_block_sparse_gemm --a_rows=1024 --n=1024 --a_cols=1024 --a_ell_num_columns=512 --a_ell_blocksize=16\n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of real-valued multiply-adds int64_t fmas = (int64_t)a_rows * (int64_t)a_cols * (int64_t)n; // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Gemm> class Testbed { public: // // Type definitions // using ElementA = typename Gemm::ElementA; using ElementB = typename Gemm::ElementB; using ElementC = typename Gemm::ElementC; using ElementAccumulator = typename Gemm::ElementAccumulator; using EpilogueOutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp; using ElementCompute = typename EpilogueOutputOp::ElementCompute; using LayoutA = typename Gemm::LayoutA; using LayoutB = typename Gemm::LayoutB; using LayoutC = typename Gemm::LayoutC; using MatrixCoord = typename LayoutC::TensorCoord; private: // // Data members // Options options; /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; cutlass::Distribution::Kind init_ELL; uint32_t seed; cutlass::HostTensor<ElementA, LayoutA> tensor_a; cutlass::HostTensor<ElementB, LayoutB> tensor_b; cutlass::HostTensor<ElementC, LayoutC> tensor_c; cutlass::HostTensor<ElementC, LayoutC> tensor_d; cutlass::HostTensor<ElementA, LayoutA> tensor_a_uncompressed; cutlass::HostTensor<ElementC, LayoutC> reference_d; cutlass::HostTensor<int32_t, LayoutA> tensor_ell_idx; public: // // Methods // Testbed( Options const &options_, cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_ELL_ = cutlass::Distribution::Uniform, uint32_t seed_ = 3080 ): options(options_), init_A(init_A_), init_B(init_B_), init_C(init_C_), init_ELL(init_ELL_), seed(seed_) { } private: /// Helper to initialize a tensor view template <typename Element, typename Layout> void initialize_tensor_( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint32_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { Element scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillRandomUniform( view, seed, scope_max, scope_min, 0); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian( view, seed, Element(), Element(0.5f)); } else if (dist_kind == cutlass::Distribution::Sequential) { // Fill with increasing elements cutlass::reference::host::BlockFillSequential( view.data(), view.capacity(), Element(1), Element()); } else { // Fill with all 1s cutlass::reference::host::BlockFillSequential( view.data(), view.capacity(), Element(), Element(1)); } } /// Initializes data structures void initialize_() { tensor_a.resize(cutlass::make_Coord(options.a_rows, options.a_ell_num_columns)); tensor_b.resize(cutlass::make_Coord(options.a_cols, options.n)); tensor_c.resize(cutlass::make_Coord(options.a_rows, options.n)); tensor_d.resize(cutlass::make_Coord(options.a_rows, options.n)); tensor_a_uncompressed.resize(cutlass::make_Coord(options.a_rows, options.a_cols)); reference_d.resize(cutlass::make_Coord(options.a_rows, options.n)); tensor_ell_idx.resize(cutlass::make_Coord(options.a_rows / options.a_ell_blocksize, options.a_ell_num_columns / options.a_ell_blocksize)); // // Initialize the problems of the workspace // initialize_tensor_(tensor_a.host_view(), init_A, seed * 2021); initialize_tensor_(tensor_b.host_view(), init_B, seed * 2022); initialize_tensor_(tensor_c.host_view(), init_C, seed * 2023); if (init_ELL == cutlass::Distribution::Uniform) { cutlass::reference::host::TensorFillRandomEllIdx( tensor_ell_idx.host_view(), seed, options.a_rows / options.a_ell_blocksize, options.a_ell_num_columns / options.a_ell_blocksize, options.a_cols / options.a_ell_blocksize); } else { for(int i = 0; i < options.a_rows / options.a_ell_blocksize; ++i) { for(int j = 0; j < options.a_ell_num_columns / options.a_ell_blocksize; ++j) { tensor_ell_idx.at({i, j}) = j+3; } } } tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ell_idx.sync_device(); } /// Verifies the result is a GEMM bool verify_() { bool passed = true; tensor_d.sync_host(); cutlass::uncompress_ell_block_sparse( tensor_a_uncompressed.host_ref(), tensor_a.host_ref(), tensor_ell_idx.host_ref(), options.a_rows, options.a_cols, options.a_ell_num_columns, options.a_ell_blocksize ); cutlass::reference::host::Gemm< typename Gemm::ElementA, typename Gemm::LayoutA, typename Gemm::ElementB, typename Gemm::LayoutB, typename Gemm::ElementC, typename Gemm::LayoutC, ElementCompute, ElementAccumulator, typename Gemm::Operator> reference_gemm; reference_gemm( {options.a_rows, options.n, options.a_cols}, options.alpha, tensor_a_uncompressed.host_ref(), tensor_b.host_ref(), options.beta, reference_d.host_ref(), ElementAccumulator(0) ); // Reference check passed = cutlass::reference::host::TensorEquals(tensor_d.host_view(), reference_d.host_view()); if (!passed) { std::cerr << "\n***\nError - problem failed the QA check\n***\n" << std::endl; std::stringstream fname; fname << "error_43_ell_block_sparse_gemm" << "mnk_" << options.a_rows << "x" << options.n << "x" << options.a_cols << "_" << options.a_ell_num_columns << "_" << options.a_ell_blocksize << ".txt"; std::cout << fname.str() << std::endl; std::ofstream results(fname.str()); results << "alpha: " << ElementCompute(options.alpha) << "\n" << "beta: " << ElementCompute(options.beta) << "\n" << "block size: " << options.a_ell_blocksize << "\n" << "\nA:\n" << tensor_a.host_view() << "\n" << "\nA Ell Index:\n" << tensor_ell_idx.host_view() << "\n" << "\nB:\n" << tensor_b.host_view() << "\n" << "\nC:\n" << tensor_c.host_view() << "\n" << "\nD reference:\n" << reference_d.host_view() << "\n" << "\nD computed:\n" << tensor_d.host_view() << "\n"; return passed; } return passed; } public: /// Returns the number of threadblocks to launch if the kernel can run on the target /// device. Otherwise, returns zero. bool sufficient() const { // // Determine SMEM requirements and waive if not satisfied // size_t smem_size = sizeof(typename Gemm::GemmKernel::SharedStorage); cudaDeviceProp properties; int device_idx; cudaError_t result = cudaGetDevice(&device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDevice() API call failed."); } result = cudaGetDeviceProperties(&properties, device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDeviceProperties() failed"); } if (properties.sharedMemPerBlockOptin < smem_size) { return false; } return true; } /// Executes a BlockedEll SpMM kernel and measures runtime. Result profile() { Result result; // Early exit if (!sufficient()) { std::cout << "Active CUDA device lacks hardware resources to run CUTLASS BlockedEll SpMM kernel." << std::endl; return result; } result.passed = false; // Initialize the problem initialize_(); // Configure the GEMM arguments typename EpilogueOutputOp::Params epilogue_op(options.alpha, options.beta); // Configure GEMM arguments typename Gemm::Arguments args( {options.a_rows, options.n, options.a_cols}, tensor_a.device_ref(), tensor_b.device_ref(), tensor_c.device_ref(), tensor_d.device_ref(), tensor_ell_idx.device_data(), options.a_ell_num_columns, options.a_ell_blocksize, options.a_base, epilogue_op ); // Initialize the GEMM object Gemm gemm{}; result.status = gemm.initialize(args); if (result.status != cutlass::Status::kSuccess) { std::cerr << "Failed to initialize CUTLASS BlockedEll SpMM kernel." << std::endl; return result; } // Run the BlockedEll SpMM object result.status = gemm.run(); if (result.status != cutlass::Status::kSuccess) { std::cerr << "Failed to run CUTLASS BlockedEll SpMM kernel." << std::endl; return result; } // Wait for completion result.error = cudaDeviceSynchronize(); if (result.error != cudaSuccess) { std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error); return result; } // // Verify correctness // result.passed = true; if (options.reference_check) { result.passed = verify_(); } // // Warm-up run // result.status = gemm.run(); if (result.status != cutlass::Status::kSuccess) { std::cerr << "Failed to run CUTLASS BlockedEll SpMM kernel." << std::endl; return result; } // // Construct events // cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return -1; } } // Record an event at the start of a series of GEMM operations result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // // Run profiling loop // for (int iter = 0; iter < options.iterations; ++iter) { gemm(); } // // Stop profiling loop // // Record an event when the GEMM operations have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Compute average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // // Cleanup // for (auto event : events) { (void)cudaEventDestroy(event); } std::cout << std::endl; std::cout << "ELL Block Sparse GEMM (CUTLASS):\n" << "====================================================" << std::endl; std::cout << std::endl; std::cout << " " << "Runtime: " << result.runtime_ms << " ms" << std::endl; std::cout << " " << " GFLOPs: " << result.gflops << std::endl; return result; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { // // This example uses mma.sync to directly access Tensor Cores to achieve peak performance. // cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) { // // This example requires an NVIDIA Ampere-architecture GPU. // std::cout << "CUTLASS's BlockedEll SpMM example requires a GPU of NVIDIA's Ampere Architecture or " << "later (compute capability 80 or greater).\n"; return 0; } // // Parse options // Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } // // Define the BlockedEll type // using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; constexpr int32_t kAlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value; constexpr int32_t kAlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; constexpr int32_t kStages = 4; using Gemm = typename cutlass::gemm::device::EllGemm< ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, ThreadblockShape, WarpShape, InstructionShape, cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator>, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, kStages, kAlignmentA, kAlignmentB>; // // Profile it // Testbed<Gemm> testbed(options); if (!testbed.sufficient()) { std::cout << "The active CUDA device lacks sufficient hardware resources to execute this kernel.\n"; return 0; } Result result = testbed.profile(); if (!result.passed) { std::cout << "Profiling CUTLASS ELL block sparse GEMM has failed.\n"; std::cout << "\nFailed\n"; return -1; } std::cout << "\nPassed\n"; return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
examples/43_ell_block_sparse_gemm/ell_block_sparse_gemm.cu/0
{ "file_path": "examples/43_ell_block_sparse_gemm/ell_block_sparse_gemm.cu", "repo_id": "examples", "token_count": 9447 }
8
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# import gen_ir import helper class gen_default_b2b_mma: def __init__(self, template_param, gen_class_name, b2b_num,cutlass_deps_root, project_root): self.gen_class_name = "DefaultB2bMma" self.template_param = template_param self.b2b_num = b2b_num self.cutlass_deps_root = cutlass_deps_root self.project_root = project_root def gen_include_header(self): code = ''' /* Auto Generated code - Do not edit.*/ #pragma once #include \"{cutlass_dir}cutlass/cutlass.h\" #include \"{cutlass_dir}cutlass/numeric_types.h\" #include \"{cutlass_dir}cutlass/arch/arch.h\" #include \"{cutlass_dir}cutlass/transform/threadblock/predicated_tile_iterator.h\" #include \"{cutlass_dir}cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h\" #include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm70.h\" #include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm75.h\" #include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm80.h\" #include \"../threadblock/b2b_mma_pipelined.h\" #include \"../../fixed_impl/epilogue/threadblock/fused_bias_act_epilogue.h\" #include \"../../fixed_impl/epilogue/threadblock/default_bias_act_epilogue_tensor_op.h\" #include \"../../fixed_impl/gemm/warp/mma_tensor_op_fragment_iterator_without_output_op.h\" '''.format(cutlass_dir=self.cutlass_deps_root) return code def gen_using_MmaCore(self, stage): threadBlockShape = "ThreadblockShape" warpShape = "WarpShape" instrunctionShape = "InstructionShape" Mma_typename = "typename cutlass::gemm::threadblock::DefaultMmaCore" gen_code = "" for i in range(self.b2b_num): code_using = "using MmaCore" + str(i) gen_code += code_using + " = " + gen_ir.gen_declare_template_struct(Mma_typename, \ helper.var_idx(threadBlockShape, i), helper.var_idx(warpShape, i), instrunctionShape, \ "ElementA", "LayoutA", \ helper.var_idx("ElementB", i), helper.var_idx("LayoutB", i), \ helper.var_idx("ElementAccumulator", i), "layout::RowMajor", \ "OperatorClass", str(stage), "Operator") return gen_code def gen_using_FusedAddBiasEpilogue(self): gen_code = "" for i in range(self.b2b_num - 1): code_using = helper.var_idx("using FusedAddBiasEpilogue", i) epilogue_name = "typename cutlass::epilogue::threadblock::DefaultFusedBiasActEpilogueTensorOp" template_args = helper.var_idx("<ThreadblockShape", i) + helper.var_idx(",typename MmaCore", i) + helper.var_idx("::MmaPolicy::Operator, 1, EpilogueOutputOp", i) + ", 2>::Epilogue" gen_code += code_using + " = " + epilogue_name + template_args + ";\n" return gen_code def gen_using_Iterator(self): code_using = "using IteratorA0" iterator_typename = "cutlass::transform::threadblock::PredicatedTileIterator" MmaCore = "MmaCore0" matrix_shape = "cutlass::MatrixShape<" + MmaCore + "::Shape::kM, " + MmaCore + "::Shape::kK>" iterator_map = "typename " + MmaCore + "::IteratorThreadMapA" gen_code = code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, \ matrix_shape, "ElementA", "LayoutA", "1", iterator_map, "AlignmentA_") for i in range(self.b2b_num): code_using = "using IteratorB" + str(i) iterator_typename = "cutlass::transform::threadblock::PredicatedTileIterator" MmaCore = "MmaCore" + str(i) matrix_shape = "cutlass::MatrixShape<" + MmaCore + "::Shape::kK, " + MmaCore + "::Shape::kN>" iterator_map = "typename " + MmaCore + "::IteratorThreadMapB" gen_code += code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, \ matrix_shape, helper.var_idx("ElementB", i), helper.var_idx("LayoutB", i), "0", iterator_map, "AlignmentB_") return gen_code def gen_fragment_iterator(self): gen_code = "using AccumulatorLayout = cutlass::layout::ColumnMajor;\n" for i in range(1, self.b2b_num): code_using = "using FragmentIteratorA" + str(i) iterator_typename = "cutlass::gemm::warp::MmaTensorOpPureFragmentIterator" curr_MmaCore = "MmaCore" + str(i) prev_MmaCore = "MmaCore" + str(i - 1) Matrix_shape_curr = "cutlass::MatrixShape<" + curr_MmaCore + "::WarpShape::kM, " + curr_MmaCore + "::InstructionShape::kK>" Matrix_shape_prev = "cutlass::MatrixShape<" + prev_MmaCore + "::WarpShape::kM, " + prev_MmaCore + "::WarpShape::kN>" Curr_shape_kK = curr_MmaCore + "::Shape::kK" gen_code += code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, \ Matrix_shape_curr, Matrix_shape_prev, Curr_shape_kK, \ helper.var_idx("ElementAccumulator", i-1), "ElementA", \ "AccumulatorLayout", "InstructionShape_", "true") return gen_code def gen_threadblockmma(self): code_using = "using ThreadblockB2bMma" iterator_typename = "cutlass::gemm::threadblock::B2bMmaPipelined" MmaPipelined_param_Mma0_shape = "typename MmaCore0::Shape" MmaPipelined_param_Mma0_iteratorA = "IteratorA0" MmaPipelined_param_Mma0_smemIteratorA = "typename MmaCore0::SmemIteratorA" MmaPipelined_param_Mma0_iteratorB = "IteratorB0" MmaPipelined_param_Mma0_smemIteratorB = "typename MmaCore0::SmemIteratorB" MmaPipelined_param_list = MmaPipelined_param_Mma0_shape + ", " + MmaPipelined_param_Mma0_iteratorA + ", " + MmaPipelined_param_Mma0_smemIteratorA + ", " + MmaPipelined_param_Mma0_iteratorB + ", " + MmaPipelined_param_Mma0_smemIteratorB + ", " for i in range(1, self.b2b_num): MmaPipelined_param_Mma_shape = "typename MmaCore" + str(i) + "::Shape" MmaPipelined_param_Mma_iteratorA = "FragmentIteratorA" + str(i) MmaPipelined_param_Mma_iteratorB = "IteratorB" + str(i) MmaPipelined_param_Mma_smemIteratorB = "typename MmaCore" + str(i) + "::SmemIteratorB" MmaPipelined_param_list += MmaPipelined_param_Mma_shape + ", " + MmaPipelined_param_Mma_iteratorA + ", " + MmaPipelined_param_Mma_iteratorB + ", " + MmaPipelined_param_Mma_smemIteratorB + ", " MmaPipelined_param_list += "ElementAccumulator0, layout::RowMajor, " for i in range(self.b2b_num - 1): epilogue_name = "EpilogueOutputOp" + str(i) MmaPipelined_param_list += epilogue_name + ", " for i in range(self.b2b_num - 1): epilogue_name = "FusedAddBiasEpilogue" + str(i) MmaPipelined_param_list += epilogue_name + ", " for i in range(self.b2b_num): MmaPolicy = "typename MmaCore" + str(i) + "::MmaPolicy" MmaPipelined_param_list += MmaPolicy + ", " cnt = 0 for i in range(self.b2b_num): MmaStage = helper.var_idx("Stages", i) final = ", " if cnt == self.b2b_num - 1: final = "" MmaPipelined_param_list += MmaStage + final cnt += 1 gen_code = code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, MmaPipelined_param_list) return gen_code def gen_code(self): gen_using = '' # Generate default template struct gen_code = gen_ir.gen_template_struct(self.gen_class_name, self.template_param, "", speicalized = None, set_default=False) # Generate specialized template struct mmacore_codebody = self.gen_using_MmaCore(2) iterator_codebody = self.gen_using_Iterator() fragment_iterator_codebody = self.gen_fragment_iterator() epilogue_iterator_codebody = self.gen_using_FusedAddBiasEpilogue() threadBlockMma = self.gen_threadblockmma() specialized_code = mmacore_codebody + iterator_codebody + fragment_iterator_codebody + epilogue_iterator_codebody + threadBlockMma # Specialize layout C -> cutlass::layout::RowMajor rtn_template_args, speicalized_template_args = gen_ir.filtered_param(self.template_param, [ ('LayoutD', "cutlass::layout::RowMajor")], keep_= True) gen_speical_code = gen_ir.gen_template_struct(self.gen_class_name, rtn_template_args, specialized_code, speicalized = speicalized_template_args, set_default=False) code = gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("threadblock", gen_code + gen_speical_code))) return self.gen_include_header() + code class gen_b2b_mme_pipelined: def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root): self.gen_class_name = "B2bMmaPipelined" self.template_param = template_param self.b2b_num = b2b_num self.cutlass_deps_root = cutlass_deps_root self.project_root = project_root def gen_include_header(self): code = ''' #pragma once #include \"{cutlass_dir}cutlass/cutlass.h\" #include \"{cutlass_dir}cutlass/array.h\" #include \"{cutlass_dir}cutlass/aligned_buffer.h\" #include \"{cutlass_dir}cutlass/numeric_conversion.h\" #include \"{cutlass_dir}cutlass/numeric_types.h\" #include \"{cutlass_dir}cutlass/matrix_shape.h\" #include \"{cutlass_dir}cutlass/gemm/gemm.h\" #include \"{cutlass_dir}cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h\" #include \"../threadblock/b2b_mma_base.h\"\n'''.format(cutlass_dir = self.cutlass_deps_root) return code def gen_using(self): code_using = "using FragmentA0 = typename IteratorA0::Fragment;\n" code_using += "using Base = B2bMmaBase<" for i in range(self.b2b_num): code_using += helper.var_idx("Shape", i) + "_, " for i in range(self.b2b_num): code_using += helper.var_idx("Policy", i) + "_, " for i in range(self.b2b_num): code_using += helper.var_idx("Stage", i) + "_, " code_using = code_using[: -2] + ">;\n" for i in range(self.b2b_num): code_using += helper.var_idx("using FragmentB", i) + helper.var_idx(" = typename IteratorB", i) + "::Fragment;\n" code_using += helper.var_idx("using FragmentC", i) + helper.var_idx(" = typename Policy", i) + "::Operator::FragmentC;\n" code_using += helper.var_idx("using Operator", i) + helper.var_idx(" = typename Policy", i) + "::Operator;\n" for i in range(self.b2b_num - 1): code_using += helper.var_idx("using IteratorC", i) + helper.var_idx(" = typename FusedAddBiasEpilogue", i) + "::OutputTileIterator;\n" code_using += "using ArchTag = typename Policy0::Operator::ArchTag;\n" code_using += "static ComplexTransform const kTransformA0 = Operator0::kTransformA;\n" for i in range(self.b2b_num): code_using += helper.var_idx("static ComplexTransform const kTransformB", i) + helper.var_idx(" = Operator", i) + "::kTransformB;\n" code_using += "private:\n" code_using += "using WarpFragmentA0 = typename Operator0::FragmentA;\n" code_using += "using WarpFragmentB0 = typename Operator0::FragmentB;\n" for i in range(1, self.b2b_num): code_using += helper.var_idx("using WarpFragmentA", i) + helper.var_idx(" = typename FragmentIteratorA", i) + "::Fragment;\n" code_using += helper.var_idx("using WarpFragmentB", i) + helper.var_idx(" = typename Operator", i) + "::FragmentB;\n" code_using += "protected:\n" code_using += "SmemIteratorA0 smem_iterator_A_;\n" for i in range(self.b2b_num): code_using += helper.var_idx("SmemIteratorB", i) + helper.var_idx(" smem_iterator_B", i) + "_;\n" return code_using def gen_operator(self, first_use_1stage = False): code = "" def gen_operator_param(b2b_num): param_code = "" param_code += "int gemm_k_iterations_0,\n" param_code += helper.var_idx("FragmentC", b2b_num-1) + helper.var_idx(" &accum", b2b_num-1) + ",\n" param_code += "IteratorA0 iterator_A,\n" for i in range(b2b_num): param_code += helper.var_idx("IteratorB", i) + " " + helper.var_idx("iterator_B", i) + ",\n" param_code += "FragmentC0 const &src_accum, \n" for i in range(b2b_num - 1): param_code += helper.var_idx("OutputOp", i) + " " + helper.var_idx("output_op_", i) + ",\n" for i in range(b2b_num - 1): param_code += helper.var_idx("FusedAddBiasEpilogue", i) + " " + helper.var_idx("epilogue_", i) + ",\n" for i in range(b2b_num - 1): param_code += helper.var_idx("IteratorC", i) + " " + helper.var_idx("iterator_C", i) + ",\n" param_code += "TransformA0 transform_A0 = TransformA0(), \n" for i in range(b2b_num): final = "(),\n" if i == b2b_num - 1: final = "()\n" param_code += helper.var_idx("TransformB", i) + " " + helper.var_idx("transform_B", i) + " = " +helper.var_idx("TransformB", i) + final return param_code def gen_first_gemm_1stage(b2b_num): accu_code = " FragmentC0 accum0 = src_accum;\n" if b2b_num == 1: accu_code = " accum0 = src_accum;\n" code ="\ \n\ FragmentA0 tb_frag_A;\n\ FragmentB0 tb_frag_B0;\n\ \n\ int smem_write_stage_idx = 1;\n\ \n\ tb_frag_A.clear();\n\ tb_frag_B0.clear();\n\ \n\ // The last kblock is loaded in the prolog\n\ iterator_A.load(tb_frag_A);\n\ iterator_B0.load(tb_frag_B0);\n\ \n\ ++iterator_A;\n\ ++iterator_B0;\n\ \n\ WarpFragmentA0 warp_frag_A0;\n\ WarpFragmentB0 warp_frag_B0;\n\ \n\ Operator0 warp_mma0;\n\ \n\ // Avoid reading out of bounds\n\ if (gemm_k_iterations_0 <= 1) {\n\ iterator_A.clear_mask();\n\ iterator_B0.clear_mask();\n\ }\n\ \n\ // Issue loads during the first warp-level matrix multiply-add *AFTER* issuing \n\ // shared memory loads (which have the tightest latency requirement).\n\ \n\ //\n\ // Mainloop\n\ //\n\ \n\ // Note: The main loop does not support Base::WarpGemmIterations == 2.\n\ CUTLASS_GEMM_LOOP\n\ for (; gemm_k_iterations_0 > 0; --gemm_k_iterations_0) {\n\ \n\ this->smem_iterator_A_.store(tb_frag_A);\n\ this->smem_iterator_B0_.store(tb_frag_B0);\n\ \n\ __syncthreads();\n\ //\n\ // Loop over GEMM K dimension\n\ //\n\ \n\ CUTLASS_PRAGMA_UNROLL\n\ for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) {\n\ \n\ // Load warp-level tiles from shared memory, wrapping to k offset if this is the last group\n\ // as the case may be.\n\ \n\ this->warp_tile_iterator_A0_.set_kgroup_index(warp_mma_k % Base::kWarpGemmIterations0);\n\ this->warp_tile_iterator_B0_.set_kgroup_index(warp_mma_k % Base::kWarpGemmIterations0);\n\ \n\ this->warp_tile_iterator_A0_.load(warp_frag_A0);\n\ this->warp_tile_iterator_B0_.load(warp_frag_B0);\n\ \n\ ++this->warp_tile_iterator_A0_;\n\ ++this->warp_tile_iterator_B0_;\n\ \n\ warp_mma0(accum0, warp_frag_A0, warp_frag_B0, accum0);\n\ }\n\ this->warp_tile_iterator_A0_.add_tile_offset({0, -Policy0::kPartitionsK * Base::kWarpGemmIterations0});\n\ this->warp_tile_iterator_B0_.add_tile_offset({-Policy0::kPartitionsK * Base::kWarpGemmIterations0, 0});\n\ \n\ __syncthreads();\n\ iterator_A.load(tb_frag_A);\n\ iterator_B0.load(tb_frag_B0);\n\ \n\ ++iterator_A;\n\ ++iterator_B0;\n\ \n\ if(gemm_k_iterations_0 <= 2) {\n\ iterator_A.clear_mask();\n\ iterator_B0.clear_mask();\n\ }\n\ }\n" return accu_code + code def gen_first_gemm_2stage(b2b_num): accu_code = " FragmentC0 accum0 = src_accum;\n" if b2b_num == 1: accu_code = " accum0 = src_accum;\n" code ="\ \n\ FragmentA0 tb_frag_A;\n\ FragmentB0 tb_frag_B0;\n\ \n\ tb_frag_A.clear();\n\ tb_frag_B0.clear();\n\ \n\ // The last kblock is loaded in the prolog\n\ iterator_A.load(tb_frag_A);\n\ iterator_B0.load(tb_frag_B0);\n\ \n\ ++iterator_A;\n\ ++iterator_B0;\n\ \n\ this->smem_iterator_A_.store(tb_frag_A);\n\ this->smem_iterator_B0_.store(tb_frag_B0);\n\ \n\ ++this->smem_iterator_A_;\n\ ++this->smem_iterator_B0_;\n\ \n\ __syncthreads();\n\ \n\ // Pair of fragments used to overlap shared memory loads and math instructions\n\ WarpFragmentA0 warp_frag_A0[2];\n\ WarpFragmentB0 warp_frag_B0[2];\n\ \n\ this->warp_tile_iterator_A0_.set_kgroup_index(0);\n\ this->warp_tile_iterator_B0_.set_kgroup_index(0);\n\ \n\ this->warp_tile_iterator_A0_.load(warp_frag_A0[0]);\n\ this->warp_tile_iterator_B0_.load(warp_frag_B0[0]);\n\ \n\ ++this->warp_tile_iterator_A0_;\n\ ++this->warp_tile_iterator_B0_;\n\ \n\ Operator0 warp_mma0;\n\ \n\ int smem_write_stage_idx = 1;\n\ \n\ // Avoid reading out of bounds\n\ if (gemm_k_iterations_0 <= 1) {\n\ iterator_A.clear_mask();\n\ iterator_B0.clear_mask();\n\ }\n\ \n\ // Issue loads during the first warp-level matrix multiply-add *AFTER* issuing \n\ // shared memory loads (which have the tightest latency requirement).\n\ iterator_A.load(tb_frag_A);\n\ \n\ //\n\ // Mainloop\n\ //\n\ \n\ // Note: The main loop does not support Base::WarpGemmIterations == 2.\n\ CUTLASS_GEMM_LOOP\n\ for (; gemm_k_iterations_0 > 0; --gemm_k_iterations_0) {\n\ \n\ //\n\ // Loop over GEMM K dimension\n\ //\n\ \n\ CUTLASS_PRAGMA_UNROLL\n\ for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) {\n\ \n\ // Load warp-level tiles from shared memory, wrapping to k offset if this is the last group\n\ // as the case may be.\n\ \n\ if (warp_mma_k == Base::kWarpGemmIterations0 - 1) {\n\ \n\ // Write fragments to shared memory\n\ this->smem_iterator_A_.store(tb_frag_A);\n\ \n\ this->smem_iterator_B0_.store(tb_frag_B0);\n\ \n\ __syncthreads();\n\ \n\ // Issue loads during the first warp-level matrix multiply-add *AFTER* issuing \n\ // shared memory loads (which have the tightest latency requirement).\n\ iterator_A.load(tb_frag_A);\n\ \n\ ++this->smem_iterator_B0_;\n\ ++this->smem_iterator_A_;\n\ \n\ \n\ // Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory\n\ if (smem_write_stage_idx == 1) {\n\ this->smem_iterator_A_.add_tile_offset({0, -Base::Stage0});\n\ this->smem_iterator_B0_.add_tile_offset({-Base::Stage0, 0});\n\ }\n\ else {\n\ this->warp_tile_iterator_A0_.add_tile_offset(\n\ {0, -Base::Stage0 * Policy0::kPartitionsK * Base::kWarpGemmIterations0});\n\ this->warp_tile_iterator_B0_.add_tile_offset(\n\ {-Base::Stage0 * Policy0::kPartitionsK * Base::kWarpGemmIterations0,\n\ 0});\n\ }\n\ \n\ smem_write_stage_idx ^= 1;\n\ }\n\ \n\ this->warp_tile_iterator_A0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);\n\ this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);\n\ \n\ this->warp_tile_iterator_A0_.load(warp_frag_A0[(warp_mma_k + 1) % 2]);\n\ this->warp_tile_iterator_B0_.load(warp_frag_B0[(warp_mma_k + 1) % 2]);\n\ \n\ ++this->warp_tile_iterator_A0_;\n\ ++this->warp_tile_iterator_B0_;\n\ \n\ if (warp_mma_k == 0) {\n\ \n\ iterator_B0.load(tb_frag_B0);\n\ \n\ ++iterator_A;\n\ ++iterator_B0;\n\ \n\ // Avoid reading out of bounds if this was the last loop iteration\n\ if (gemm_k_iterations_0 <= 2) {\n\ iterator_A.clear_mask();\n\ iterator_B0.clear_mask();\n\ }\n\ }\n\ \n\ warp_mma0(accum0, warp_frag_A0[warp_mma_k % 2], warp_frag_B0[warp_mma_k % 2], accum0);\n\ }\n\ }\n" return accu_code + code def gen_other_gemms_2stage(b2b_num): code = "" def gemm_teamplate(id): code = "// " + str(id + 1) + " Gemm" code += " /// Iterator to load a warp-scoped tile of A1 operand from intermediate accumulator tile\n" code += " " + helper.var_idx("FragmentC", id - 1) + helper.var_idx(" after_epilogue_accu", id - 1) + ";\n" code += " " + helper.var_idx("epilogue_", id - 1) + helper.var_idx("(output_op_", id - 1) + helper.var_idx(", accum", id - 1) \ + helper.var_idx(", after_epilogue_accu", id - 1) + helper.var_idx(", iterator_C", id - 1) +");\n" # FragmentIteratorA1 warp_tile_iterator_A1_(accum0); code += " " + helper.var_idx("FragmentIteratorA", id) + helper.var_idx(" warp_tile_iterator_A", id) +"_(" + helper.var_idx("after_epilogue_accu", id - 1) + ");\n" # FragmentB1 tb_frag_B1; code += " " + helper.var_idx("FragmentB", id) + " " + helper.var_idx("tb_frag_B", id) + ";\n" # tb_frag_B1.clear(); code += " " + helper.var_idx("tb_frag_B", id) + ".clear();\n" # iterator_B1.load(tb_frag_B1); code += " " + helper.var_idx("iterator_B", id) + ".load(" + helper.var_idx("tb_frag_B", id) + ");\n" # ++iterator_B1; code += " " + "++" + helper.var_idx("iterator_B", id) + ";\n" # this->smem_iterator_B1_.store(tb_frag_B1); code += " " + helper.var_idx("this->smem_iterator_B", id) + "_.store(" + helper.var_idx("tb_frag_B", id) + ");\n" # ++this->smem_iterator_B1_; code += " " + helper.var_idx("++this->smem_iterator_B", id) + "_;\n" # __syncthreads(); code += " " + "__syncthreads();\n" # WarpFragmentA1 warp_frag_A1[2]; code += " " + helper.var_idx("WarpFragmentA", id) + helper.var_idx(" warp_frag_A", id) + "[2];\n" # WarpFragmentB1 warp_frag_B1[2]; code += " " + helper.var_idx("WarpFragmentB", id) + helper.var_idx(" warp_frag_B", id) + "[2];\n" # this->warp_tile_iterator_B1_.set_kgroup_index(0); code += " " + helper.var_idx("this->warp_tile_iterator_B", id) + "_.set_kgroup_index(0);\n" # warp_tile_iterator_A1_.load(warp_frag_A1[0], output_op_0); code += " " + helper.var_idx("warp_tile_iterator_A", id) + helper.var_idx("_.load(warp_frag_A", id) + "[0]);\n" # this->warp_tile_iterator_B1_.load(warp_frag_B1[0]); code += " " + helper.var_idx("this->warp_tile_iterator_B", id) + helper.var_idx("_.load(warp_frag_B", id) + "[0]);\n" # ++warp_tile_iterator_A1_; code += " " + helper.var_idx("++warp_tile_iterator_A", id) + "_;\n" # ++this->warp_tile_iterator_B1_; code += " " + helper.var_idx("++this->warp_tile_iterator_B", id) + "_;\n" # Operator1 warp_mma1; code += " " + helper.var_idx("Operator", id) + " " + helper.var_idx("warp_mma", id) + ";\n" # smem_write_stage_idx = 1; code += " " + "smem_write_stage_idx = 1;\n" # int gemm_k_iterations_1 = FragmentIteratorA1::Policy::kIterations / Base::kWarpGemmIterations1; code += " " + helper.var_idx("int gemm_k_iterations_", id) + " = " + helper.var_idx("FragmentIteratorA", id) + helper.var_idx("::Policy::kIterations / Base::kWarpGemmIterations", id) +";\n" # if (gemm_k_iterations_1 <= 1) { # iterator_B1.clear_mask(); # } code += " " + "if (" + helper.var_idx("gemm_k_iterations_", id) + " <= 1 ){\n" \ + " " + " " + helper.var_idx("iterator_B", id) + ".clear_mask();\n" \ + " " +"}\n" # CUTLASS_PRAGMA_UNROLL code += " " + "CUTLASS_PRAGMA_UNROLL\n" # for (; gemm_k_iterations_1 > 0; --gemm_k_iterations_1) { code += " " + helper.var_idx("for (; gemm_k_iterations_", id) + helper.var_idx(" > 0; --gemm_k_iterations_", id) + ") {\n" # CUTLASS_PRAGMA_UNROLL code += " " + " " + "CUTLASS_PRAGMA_UNROLL\n" # for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) { code += " " + " " + helper.var_idx("for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations", id) + "; ++warp_mma_k) {\n" # if (warp_mma_k == Base::kWarpGemmIterations1 - 1) { code += " " + " " + " " + helper.var_idx("if (warp_mma_k == Base::kWarpGemmIterations", id) + " - 1) {\n" # this->smem_iterator_B1_.store(tb_frag_B1); code += " " + " " + " " + " " + helper.var_idx(" this->smem_iterator_B", id) + helper.var_idx("_.store(tb_frag_B", id) + ");\n" # __syncthreads(); code += " " + " " + " " + " " + "__syncthreads();\n" # ++smem_iterator_B1_; code += " " + " " + " " + " " + helper.var_idx(" ++smem_iterator_B", id) + "_;\n" # if (smem_write_stage_idx == 1) { # smem_iterator_B1_.add_tile_offset({-Base::Stage, 0}); # } code += " " + " " + " " + " " + "if ( smem_write_stage_idx == 1 ) {\n" \ + " " + " " + " " + " " + " " + helper.var_idx("smem_iterator_B", id) + helper.var_idx("_.add_tile_offset({-Base::Stage", i) + ", 0});\n" \ + " " + " " + " " + " " +"}\n" # else { # this->warp_tile_iterator_B1_.add_tile_offset( # {-Base::Stage * Policy1::kPartitionsK * # Base::kWarpGemmIterations1, # 0}); # } code += " " + " " + " " + " " + "else {\n" \ + " " + " " + " " + " " + " " + helper.var_idx("this->warp_tile_iterator_B", id) + "_.add_tile_offset(\n" \ + " " + " " + " " + " " + " " + helper.var_idx("{-Base::Stage", id) + helper.var_idx(" * Policy", id) + "::kPartitionsK *\n" \ + " " + " " + " " + " " + " " + helper.var_idx("Base::kWarpGemmIterations", id) + ",\n" \ + " " + " " + " " + " " + " " + "0});\n" \ + " " + " " + " " + " " + "}\n" # smem_write_stage_idx ^= 1; # } code += " " + " " + " " + " " + "smem_write_stage_idx ^= 1;\n" \ + " " + " " + " " + "}\n" # this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations1); code += " " + " " + " " + helper.var_idx("this->warp_tile_iterator_B", id) + helper.var_idx("_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations", id) + ");\n" # warp_tile_iterator_A1_.load(warp_frag_A1[(warp_mma_k + 1) % 2], output_op_0); code += " " + " " + " " + helper.var_idx("warp_tile_iterator_A", id) + helper.var_idx("_.load(warp_frag_A", id) + "[(warp_mma_k + 1) % 2]);\n" # this->warp_tile_iterator_B1_.load(warp_frag_B1[(warp_mma_k + 1) % 2]); code += " " + " " + " " + helper.var_idx("this->warp_tile_iterator_B", id) + helper.var_idx("_.load(warp_frag_B", id) + "[(warp_mma_k + 1) % 2]);\n" # ++warp_tile_iterator_A1_; code += " " + " " + " " + helper.var_idx("++warp_tile_iterator_A", id) + "_;\n" # ++this->warp_tile_iterator_B1_; code += " " + " " + " " + helper.var_idx("++this->warp_tile_iterator_B", id) + "_;\n" # if (warp_mma_k == 0) { # iterator_B1.load(tb_frag_B1); # ++iterator_B1; # if (gemm_k_iterations_1 <= 2) { # iterator_B1.clear_mask(); # } # } code += " " + " " + " " + " if (warp_mma_k == 0) {\n" \ + " " + " " + " " + " " + helper.var_idx("iterator_B", id) + helper.var_idx(".load(tb_frag_B", id) + ");\n" \ + " " + " " + " " + " " + helper.var_idx("++iterator_B", id) +";\n" \ + " " + " " + " " + " " + helper.var_idx("if (gemm_k_iterations_", id) +" <= 2) {\n" \ + " " + " " + " " + " " + " " + helper.var_idx("iterator_B", id) + ".clear_mask();\n" \ + " " + " " + " " + " " + "}\n" \ + " " + " " + " " + "}\n" # warp_mma1(accum, warp_frag_A1[warp_mma_k % 2], warp_frag_B1[warp_mma_k % 2], accum); # } # } code += " " + " " + " " + helper.var_idx("warp_mma", id) + helper.var_idx("(accum", id) + helper.var_idx(", warp_frag_A", id) + helper.var_idx("[warp_mma_k % 2], warp_frag_B", id) + helper.var_idx("[warp_mma_k % 2], accum", id) + ");\n" \ + " " + " " + "}\n" \ + " " + "}\n\n\n" return code for i in range (1, b2b_num): clear_accu = "" if i != b2b_num - 1: clear_accu = " " + helper.var_idx("FragmentC", i) + helper.var_idx(" accum", i) +";\n" clear_accu += " " + helper.var_idx("accum", i) +".clear();\n" code += clear_accu + gemm_teamplate(i) return code operator_code = " CUTLASS_DEVICE\n\ void operator()(\n " + gen_operator_param(self.b2b_num) + ") {\n" if first_use_1stage: operator_code += gen_first_gemm_1stage(self.b2b_num) else: operator_code += gen_first_gemm_2stage(self.b2b_num) operator_code += gen_other_gemms_2stage(self.b2b_num) + "}\n" return operator_code def gen_construct_func(self): name = self.gen_class_name func_code = "CUTLASS_DEVICE\n" func_code += name + "(\n" \ + " " + "typename Base::B2bMmaSharedStorage &shared_storage,\n" \ + " " + "int thread_idx,\n" \ + " " + "int warp_idx,\n" \ + " " + "int lane_idx\n" \ + "):\n" func_code += " " + "Base(shared_storage, thread_idx, warp_idx, lane_idx),\n" \ + " " + "smem_iterator_A_(shared_storage.sharedStorage0.operand_A_ref(), thread_idx),\n" for i in range(self.b2b_num): final = ",\n" if i == self.b2b_num - 1: final = " {\n" func_code += helper.var_idx("smem_iterator_B", i) + helper.var_idx("_(shared_storage.sharedStorage", i) +".operand_B_ref(), thread_idx)" + final func_code += " " + "int warp_idx_mn = warp_idx % (Base::WarpCount0::kM * Base::WarpCount0::kN);\n" func_code += " " + "int warp_idx_k = warp_idx / (Base::WarpCount0::kM * Base::WarpCount0::kN);\n" func_code += " " + "int warp_idx_m = warp_idx_mn % Base::WarpCount0::kM;\n" func_code += " " + "int warp_idx_n = warp_idx_mn / Base::WarpCount0::kM;\n" for i in range(self.b2b_num): func_code += " " + helper.var_idx("int tile_offset_k", i) + helper.var_idx(" = Base::kWarpGemmIterations", i) + " * warp_idx_k;\n" func_code += " " + "this->warp_tile_iterator_A0_.add_tile_offset({warp_idx_m, tile_offset_k0});\n" for i in range(self.b2b_num): func_code += " " + helper.var_idx("this->warp_tile_iterator_B", i) + helper.var_idx("_.add_tile_offset({tile_offset_k", i) + ", warp_idx_n});\n" func_code += "}\n" return func_code def gen_member_func(self, first_use_1stage): code = "public:\n" code += self.gen_operator(first_use_1stage) code += self.gen_construct_func() return code def gen_code(self, first_use_1stage): def gen_template_args(b2b_num): template_param = [] template_param.append(("typename", "Shape0")) template_param.append(("typename", "IteratorA0")) template_param.append(("typename", "SmemIteratorA0")) template_param.append(("typename", "IteratorB0")) template_param.append(("typename", "SmemIteratorB0")) for i in range(1, b2b_num): template_param.append(("typename", helper.var_idx("Shape", i))) template_param.append(("typename", helper.var_idx("FragmentIteratorA", i))) template_param.append(("typename", helper.var_idx("IteratorB", i))) template_param.append(("typename", helper.var_idx("SmemIteratorB", i))) template_param.append(("typename", "ElementC")) template_param.append(("typename", "LayoutC")) for i in range(0, b2b_num - 1): template_param.append(("typename", helper.var_idx("OutputOp", i))) for i in range(0, b2b_num - 1): template_param.append(("typename", helper.var_idx("FusedAddBiasEpilogue", i))) for i in range(0, b2b_num): template_param.append(("typename", helper.var_idx("Policy", i))) for i in range(0, b2b_num): template_param.append((int, helper.var_idx("Stage", i))) template_param.append(("typename","TransformA0", "NumericArrayConverter<typename SmemIteratorA0_::Element, typename IteratorA0_::Element, IteratorA0_::Fragment::kElements>")) for i in range(0, b2b_num): cvtr = helper.var_idx("NumericArrayConverter<typename SmemIteratorB", i) + helper.var_idx("_::Element, typename IteratorB", i) + helper.var_idx("_::Element, IteratorB", i) + "_::Fragment::kElements>" template_param.append(("typename", helper.var_idx("TransformB", i), cvtr)) template_param.append(("typename", "Enable", "bool")) return template_param template_param = gen_template_args(self.b2b_num) inheritance_code = "public B2bMmaBase<" for i in range(self.b2b_num): inheritance_code += helper.var_idx("Shape", i) + "_, " for i in range(self.b2b_num): inheritance_code += helper.var_idx("Policy", i) + "_, " for i in range(self.b2b_num - 1): inheritance_code += helper.var_idx("Stage", i) + "_, " inheritance_code += helper.var_idx("Stage", self.b2b_num - 1) + "_" inheritance_code += ">" code_body = "" using_code= self.gen_using() func_code = self.gen_member_func(first_use_1stage) code_body = using_code + func_code class_code = gen_ir.gen_template_class(self.gen_class_name, template_param, code_body, inheritance_code = inheritance_code) code = self.gen_include_header() code += gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("threadblock", class_code))) # print(code) return code class gen_b2b_mma_base: def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root): self.gen_class_name = gen_class_name self.template_param = template_param self.b2b_num = b2b_num self.cutlass_deps_root = cutlass_deps_root self.project_root = project_root def gen_include_header(self): code = ''' #pragma once #include \"{cutlass_dirs}cutlass/aligned_buffer.h\" #include \"{cutlass_dirs}cutlass/arch/memory.h\" #include \"{cutlass_dirs}cutlass/array.h\" #include \"{cutlass_dirs}cutlass/cutlass.h\" #include \"{cutlass_dirs}cutlass/gemm/gemm.h\" #include \"{cutlass_dirs}cutlass/matrix_shape.h\" #include \"{cutlass_dirs}cutlass/numeric_types.h\"\n'''.format(cutlass_dirs=self.cutlass_deps_root) return code def gen_shared_storage(self): code = \ " template< \n\ typename Shape_,\n\ typename Policy_,\n\ int ThisStage_\n\ >\n\ class SharedStorage {\n\ public:\n\ using Shape = Shape_;\n\ using Policy = Policy_;\n\ static int const ThisStage = ThisStage_;\n\ using Operator = typename Policy::Operator;\n\ \ using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;\n\ \ /// Tensor reference to the B operand \n\ using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;\n\ \n\ /// Shape of the A matrix operand in shared memory \n\ using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow,\n\ Shape::kK * ThisStage +\n\ Policy::SmemPaddingA::kColumn>;\n\ \n\ /// Shape of the B matrix operand in shared memory\n\ using ShapeB =\n\ MatrixShape<Shape::kK * ThisStage + Policy::SmemPaddingB::kRow,\n\ Shape::kN + Policy::SmemPaddingB::kColumn>;\n\ \n\ public:\n\ \n\ /// Buffer for A operand\n\ AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A;\n\ \n\ /// Buffer for B operand\n\ AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;\n\ \n\ public:\n\ \n\ /// Returns a layout object for the A matrix\n\ CUTLASS_DEVICE\n\ static typename Operator::LayoutA LayoutA() {\n\ return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});\n\ }\n\ \n\ /// Returns a layout object for the B matrix\n\ CUTLASS_HOST_DEVICE\n\ static typename Operator::LayoutB LayoutB() {\n\ return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});\n\ }\n\ \n\ /// Returns a TensorRef to the A operand\n\ CUTLASS_HOST_DEVICE\n\ TensorRefA operand_A_ref() {\n\ return TensorRefA{operand_A.data(), LayoutA()};\n\ }\n\ \n\ /// Returns a TensorRef to the B operand\n\ CUTLASS_HOST_DEVICE\n\ TensorRefB operand_B_ref() {\n\ return TensorRefB{operand_B.data(), LayoutB()};\n\ }\n\ CUTLASS_HOST_DEVICE\n\ void * get_B_Shared_ptr() {\n\ return operand_B.data();\n\ }\n\ };\n" return code def gen_using_and_misc(self, b2b_num): code_using = "" for i in range(b2b_num): code_using += "using Operator" +str(i) + " = typename Policy" + str(i) +"::Operator;\n" for i in range(b2b_num): code_using += "using WarpGemm" +str(i) + " = typename Policy" + str(i) +"::Operator::Shape;\n" for i in range(b2b_num): code_using += "using WarpCount" +str(i) + " = GemmShape<" + helper.var_idx("Shape", i) +"::kM / " + helper.var_idx("WarpGemm", i) +"::kM, "\ + helper.var_idx("Shape", i) +"::kN / " + helper.var_idx("WarpGemm", i) +"::kN, "\ + helper.var_idx("Shape", i) +"::kK / " + helper.var_idx("WarpGemm", i) +"::kK>;\n" code_misc = "" for i in range(b2b_num): code_misc += "static int const " + helper.var_idx("kWarpGemmIterations", i) + " = (" + helper.var_idx("WarpGemm", i) + "::kK / " + helper.var_idx("Operator", i) +"::Policy::MmaShape::kK);\n" code = code_using + code_misc + self.gen_shared_storage() for i in range(b2b_num): code += "using " + helper.var_idx("SharedStorage", i) + " = SharedStorage<" + helper.var_idx("Shape", i) + ", " + helper.var_idx("Policy", i) +", " + helper.var_idx("Stage", i) + ">;\n" def gen_union_shared_storage(b2b_num): code = "" for i in range(b2b_num): code += " " +helper.var_idx("SharedStorage", i) + " " + helper.var_idx("sharedStorage", i) +";\n" return code code += "union B2bMmaSharedStorage {\n" + gen_union_shared_storage(self.b2b_num) + "};\n" for i in range(b2b_num - 1): code += helper.var_idx("void * C", i) + "_smm_ptr;\n" return code def gen_protected(self): code = "\nprotected:\n" code += "typename Operator0::IteratorA warp_tile_iterator_A0_;\n" for i in range(self.b2b_num): code += "typename Operator" +str(i) + "::IteratorB" +" warp_tile_iterator_B" + str(i) + "_;\n" return code def gen_public_member(self): code = "\npublic:\n" code += "CUTLASS_DEVICE\n" code += \ "B2bMmaBase(\n" + \ " B2bMmaSharedStorage & shared_storage,\n" + \ " int thread_idx,\n" + \ " int warp_idx,\n" + \ " int lane_idx\n" + \ "):\n" + \ " warp_tile_iterator_A0_(shared_storage.sharedStorage0.operand_A_ref(), lane_idx),\n" for i in range(self.b2b_num): final = ",\n" if i == self.b2b_num-1: final = "\n" iterator = " warp_tile_iterator_B" + str(i) + "_" shared_storage = "shared_storage.sharedStorage" + str(i) + ".operand_B_ref()" code += iterator + "(" + shared_storage + ", lane_idx)" + final code += "{\n" for i in range(self.b2b_num - 1): code += helper.var_idx(" C", i) + helper.var_idx("_smm_ptr = shared_storage.sharedStorage", i) + ".get_B_Shared_ptr();\n" code += "}\n" return code def gen_code(self): template_arg = [] for i in range(self.b2b_num): template_arg.append(("typename", helper.var_idx("Shape", i))) for i in range(self.b2b_num): template_arg.append(("typename", helper.var_idx("Policy", i))) for i in range(self.b2b_num): template_arg.append((int, helper.var_idx("Stage", i))) code_body = self.gen_using_and_misc(self.b2b_num) code_body += self.gen_protected() code_body += self.gen_public_member() class_code = gen_ir.gen_template_class("B2bMmaBase", template_arg, code_body) code = self.gen_include_header() + gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("threadblock", class_code))) return code class gen_threadblock: def __init__(self, template_param, gen_class_name, b2b_num, output_dir, cutlass_deps_root, project_root): self.gen_class_name = gen_class_name self.template_param = template_param self.b2b_num = b2b_num self.file_dir = output_dir + "/threadblock/" self.cutlass_deps_root = cutlass_deps_root self.project_root = project_root self.gen_b2b_mma_base = gen_b2b_mma_base(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root) self.gen_b2b_mma_pipelined = gen_b2b_mme_pipelined(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root) self.gen_default_b2b_mma = gen_default_b2b_mma(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root) def gen_code(self, first_use_1stage): base_code = self.gen_b2b_mma_base.gen_code() print("[INFO]: Gen kernel code [b2b_mma_base.h]output Dir: is ", self.file_dir) with open(self.file_dir + "b2b_mma_base.h", "w+") as f: f.write(base_code) pipeline_code = self.gen_b2b_mma_pipelined.gen_code(first_use_1stage = first_use_1stage) print("[INFO]: Gen kernel code [b2b_mma_pipelined.h]output Dir: is ", self.file_dir) with open(self.file_dir + "b2b_mma_pipelined.h", "w+") as f: f.write(pipeline_code) default_code = self.gen_default_b2b_mma.gen_code() print("[INFO]: Gen kernel code [default_b2b_mma.h]output Dir: is ", self.file_dir) with open(self.file_dir + "default_b2b_mma.h", "w+") as f: f.write(default_code)
examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_threadblock.py/0
{ "file_path": "examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_threadblock.py", "repo_id": "examples", "token_count": 24346 }
9
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/functional.h" #include "cutlass/gemm/gemm.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator.h" #include "cutlass/epilogue/threadblock/epilogue_base.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/numeric_types.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Epilogue operator template < typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM ///< Output operator typename OutputOp0_, typename OutputOp1_, typename OutputOp2_, typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape) bool StoreD0 = true, bool StoreD1 = true, int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large (!IsEpilogueFunctorHeavy<OutputOp0_>::value) > class DualEpilogue { public: using Base = EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition>; using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; static int const kPartitionsK = PartitionsK; static bool constexpr kStoreD0 = StoreD0; static bool constexpr kStoreD1 = StoreD1; using OutputTileIterator = OutputTileIterator_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using SharedLoadIterator = SharedLoadIterator_; using OutputOp0 = OutputOp0_; using OutputOp1 = OutputOp1_; using OutputOp2 = OutputOp2_; using Padding = Padding_; using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// The complete warp-level accumulator tile using AccumulatorTile = typename Base::AccumulatorTile; /// Accumulator element using ElementAccumulator = typename WarpTileIterator::Element; /// Output element using ElementOutput = typename OutputTileIterator::Element; /// Output access size static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; /// Tensor reference to destination tensor using TensorRef = typename OutputTileIterator::TensorRef; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Const tensor reference to source tensor using ConstTensorRef = typename OutputTileIterator::ConstTensorRef; /// Array type used to output using OutputAccessType = Array< typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Number of warps using WarpCount = typename Base::WarpCount; struct SharedStorage { using Element = typename WarpTileIterator::Element; /// Tensor reference to shared memory allocation using TensorRef = typename WarpTileIterator::TensorRef; /// Logical shape of the shared memory tile written to by all warps. using Shape = typename Base::Shape; /// Shape of the shared memory allocation for the epilogue using StorageShape = typename Base::SharedStorage::StorageShape; // // Data members // AlignedBuffer<Element, StorageShape::kCount> storage[2]; // // Methods // /// Returns a tensor reference to the shared memory buffer CUTLASS_DEVICE TensorRef reference(int i) { return TensorRef( storage[i].data(), Layout::packed({StorageShape::kRow, StorageShape::kColumn})); } }; static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK; static int constexpr kSmemPointerOffset = SharedStorage::StorageShape::kCount / kSmemTiles; public: static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements, "Mismatch between shared load iterator and output tile iterator."); static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero."); static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess), "Divisibility"); private: /// Loads fragment from shared memory aligned with output tensor SharedLoadIterator shared_load_iterator0_; SharedLoadIterator shared_load_iterator1_; /// Stores a warp's fragment of accumulators to SMEM WarpTileIterator warp_tile_iterator0_; WarpTileIterator warp_tile_iterator1_; public: /// Constructor CUTLASS_DEVICE DualEpilogue( SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx ///< Id of thread within warp ): shared_load_iterator0_(shared_storage.reference(0), thread_idx), shared_load_iterator1_(shared_storage.reference(1), thread_idx), warp_tile_iterator0_(shared_storage.reference(0), lane_idx), warp_tile_iterator1_(shared_storage.reference(1), lane_idx) { int warp_k = warp_idx / (WarpCount::kM * WarpCount::kN); int warp_mn = warp_idx % (WarpCount::kM * WarpCount::kN); int warp_m = warp_mn % WarpCount::kM; int warp_n = warp_mn / WarpCount::kM; MatrixCoord warp_offset{warp_k * WarpCount::kM + warp_m, warp_n}; warp_tile_iterator0_.add_tile_offset(warp_offset); warp_tile_iterator1_.add_tile_offset(warp_offset); } /// Streams the result to global memory CUTLASS_DEVICE void operator()( OutputOp0 const &output_op0, OutputOp1 const &output_op1, OutputOp2 const &output_op2, OutputTileIterator dest0, OutputTileIterator dest1, OutputTileIterator dest2, AccumulatorTile const &accumulator0, AccumulatorTile const &accumulator1, OutputTileIterator source_iterator[2], bool writeToD2 // true if it's the final split-k ) { // TODO: Implement when no source is needed typename OutputTileIterator::Fragment source_fragment[2]; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 2; ++i) { source_fragment[i].clear(); } // // Iterator over warp-level accumulator fragment // AccumulatorFragmentIterator accum_fragment_iterator[2] = {accumulator0, accumulator1}; // // Iterate over accumulator tile // #pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1) for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) { // // Load the source // CUTLASS_PRAGMA_UNROLL for (int i = 0; i < 2; ++i) { source_iterator[i].load(source_fragment[i]); ++source_iterator[i]; } // // Convert and store fragment // __syncthreads(); acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push( iter, accum_fragment_iterator[0], this->warp_tile_iterator0_); acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push( iter, accum_fragment_iterator[1], this->warp_tile_iterator1_); __syncthreads(); // // Load fragments from shared memory // typename SharedLoadIterator::Fragment aligned_accum_fragment0[kPartitionsK]; typename SharedLoadIterator::Fragment aligned_accum_fragment1[kPartitionsK]; shared_load_iterator0_.load(aligned_accum_fragment0[0]); shared_load_iterator1_.load(aligned_accum_fragment1[0]); // If the number of k-slices is > 1 - perform a reduction amongst the k-slices if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { shared_load_iterator0_.add_pointer_offset(kSmemPointerOffset); shared_load_iterator1_.add_pointer_offset(kSmemPointerOffset); shared_load_iterator0_.load(aligned_accum_fragment0[i]); shared_load_iterator1_.load(aligned_accum_fragment1[i]); aligned_accum_fragment0[0] = add_fragments(aligned_accum_fragment0[0], aligned_accum_fragment0[i]); aligned_accum_fragment1[0] = add_fragments(aligned_accum_fragment1[0], aligned_accum_fragment1[i]); } shared_load_iterator0_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset); shared_load_iterator1_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset); } // // Compute the output result // typename OutputTileIterator::Fragment output_fragment[3]; apply_output_operator_(output_fragment, output_op0, output_op1, output_op2, aligned_accum_fragment0[0], aligned_accum_fragment1[0], source_fragment); // // Store the final result // if (kStoreD0) { dest0.store(output_fragment[0]); ++dest0; } if (kStoreD1) { dest1.store(output_fragment[1]); ++dest1; } if (writeToD2) { dest2.store(output_fragment[2]); ++dest2; } } } private: static_assert(kPartitionsK == 1 || Base::kFragmentsPerIteration == 1, "One of these must be exactly 1."); template<class Seq> struct acc2smem_source_needed; template <size_t... Seq> struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> { template<int Advance> CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator, WarpTileIterator &warp_tile_iterator) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; } typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); warp_tile_iterator.store(accum_fragment); } CUTLASS_DEVICE static void push(size_t pos, AccumulatorFragmentIterator const &iterator_begin, WarpTileIterator &warp_tile_iterator) { int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...}; } }; /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_( typename OutputTileIterator::Fragment (&output_fragment)[3], OutputOp0 const &output_op0, OutputOp1 const &output_op1, OutputOp2 const &output_op2, typename SharedLoadIterator::Fragment const& aligned_accum_fragment0, typename SharedLoadIterator::Fragment const& aligned_accum_fragment1, typename OutputTileIterator::Fragment const (&source_fragment)[2]) { OutputAccessType* output_frag_ptr[3] = { reinterpret_cast<OutputAccessType *>(&output_fragment[0]), reinterpret_cast<OutputAccessType *>(&output_fragment[1]), reinterpret_cast<OutputAccessType *>(&output_fragment[2]) }; AccumulatorAccessType const *compute_frag_ptr[2] = { reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment0), reinterpret_cast<AccumulatorAccessType const *>(&aligned_accum_fragment1) }; OutputAccessType const *source_frag_ptr[2] = { reinterpret_cast<OutputAccessType const *>(&source_fragment[0]), reinterpret_cast<OutputAccessType const *>(&source_fragment[1]) }; int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { // Call the output operators output_frag_ptr[0][i] = output_op0(compute_frag_ptr[0][i], source_frag_ptr[0][i]); output_frag_ptr[1][i] = output_op1(compute_frag_ptr[1][i], source_frag_ptr[1][i]); output_frag_ptr[2][i] = output_op2(output_frag_ptr[0][i], output_frag_ptr[1][i]); } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
examples/45_dual_gemm/threadblock/dual_epilogue.h/0
{ "file_path": "examples/45_dual_gemm/threadblock/dual_epilogue.h", "repo_id": "examples", "token_count": 5594 }
10
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cute/tensor.hpp" #include "cutlass/arch/arch.h" #include "cutlass/gemm/device/gemm_universal_adapter.h" #include "cutlass/gemm/kernel/gemm_universal.hpp" #include "cutlass/gemm/collective/collective_builder.hpp" #include "cutlass/epilogue/collective/collective_epilogue.hpp" #include "cutlass/epilogue/thread/linear_combination.h" namespace example { // // GETT entry point // template < class ProblemShapeMNKL, class ElementA, class StrideA, class ElementB, class StrideB, class ElementAccumulator, class ElementC, class StrideC, class ElementD, class StrideD, class ElementEpilogue> cutlass::Status gett_kernel( ProblemShapeMNKL problem_shape_mnkl, ElementA const* ptr_A, StrideA stride_a_mkl, ElementB const* ptr_B, StrideB stride_b_nkl, ElementAccumulator _, ElementC const* ptr_C, StrideC stride_c_mnl, ElementD * ptr_D, StrideD stride_d_mnl, ElementEpilogue alpha, ElementEpilogue beta, cudaStream_t stream = 0) { using namespace cute; // TileShape -- GETT configuration // Specify the number of elements to take from each mode // BLK_M = (M0,M1,...) BLK_N = (M0,M1,...) BLK_K = (K0,K1,...) // Take 128 from m0, 128 from n0, 64 from k0 using TileShape = Shape<Shape<_128>, Shape<_128>, Shape<_64>>; /* Other examples: * Take 32 elements from m0 and 4 elements from m1 * Take 64 elements from n0 and 2 elements from n1 * Take 8 elements from k0 and 8 elements from k1 **/ // using TileShape = Shape<Shape<_32,_4>, Shape<_64,_2>, Shape<_8,_8>>; using EpilogueThreadOp = cutlass::epilogue::thread::LinearCombination< ElementD, 1, ElementAccumulator, ElementEpilogue, cutlass::epilogue::thread::ScaleType::Default, cutlass::FloatRoundStyle::round_to_nearest, ElementC>; // No changes are required to the default epilogue using CollectiveEpilogue = cutlass::epilogue::collective::detail::Sm90TmaWarpSpecializedAdapter< cutlass::epilogue::collective::DefaultEpilogue< StrideC, StrideD, EpilogueThreadOp, cutlass::gemm::EpilogueDefault>>; // CollectiveMma for GETTs can be built using the CollectiveBuilders using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, StrideA, 128 / cutlass::sizeof_bits<ElementA>::value, ElementB, StrideB, 128 / cutlass::sizeof_bits<ElementB>::value, ElementAccumulator, TileShape, Shape<_1,_2,_1>, cutlass::gemm::collective::StageCountAutoCarveout< static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>, cutlass::gemm::collective::KernelScheduleAuto >::CollectiveOp; // The GETT kernel is a composition of a collective mainloop and epilogue, just like any 3.x GEMM using GettKernel = cutlass::gemm::kernel::GemmUniversal< ProblemShapeMNKL, CollectiveMainloop, CollectiveEpilogue>; using GettOperator = cutlass::gemm::device::GemmUniversalAdapter<GettKernel>; typename GettOperator::Arguments args { cutlass::gemm::GemmUniversalMode::kBatched, problem_shape_mnkl, { ptr_A, stride_a_mkl, ptr_B, stride_b_nkl }, { {alpha, beta}, ptr_C, stride_c_mnl, ptr_D, stride_d_mnl } }; #if CUTLASS_DEBUG_TRACE_LEVEL > 0 print("Problem shape:"); print("\tM: "); print(cute::get<0>(problem_shape_mnkl)); print("\n"); print("\tN: "); print(cute::get<1>(problem_shape_mnkl)); print("\n"); print("\tK: "); print(cute::get<2>(problem_shape_mnkl)); print("\n"); print("\tL: "); print(cute::get<3>(problem_shape_mnkl)); print("\n"); print("TileSape:"); print(TileShape{}); print("\n"); #endif GettOperator op; return op(args, stream); } } // namespace example
examples/51_hopper_gett/gett_kernel.cuh/0
{ "file_path": "examples/51_hopper_gett/gett_kernel.cuh", "repo_id": "examples", "token_count": 1911 }
11
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cute/tensor.hpp" #include <cuda.h> #include "helper.h" template <class QuantizedElement, class DequantizedElement, class OperandLayout, class ElementScale, class ElementZero, class ScaleBroadCastLayout, class ThrLayout> __global__ void dequantize_weight_kernel(DequantizedElement* dq_buffer, QuantizedElement const* q_buffer, OperandLayout const operand_layout, ElementScale const* scale_buffer, ElementZero const* zero_buffer, ScaleBroadCastLayout const broadcasted_scale_layout, ThrLayout thr_layout) { using namespace cute; // Represent the full tensors to gmem elements. // These are expected to have shape [MN, K, L] Tensor gmem_op_dq = make_tensor(make_gmem_ptr(dq_buffer), operand_layout); auto init_quantized_iterator = [&]() { if constexpr (cute::sizeof_bits_v<QuantizedElement> >= 8) { return make_gmem_ptr(q_buffer); } else { return subbyte_iterator<const QuantizedElement>(q_buffer); } }; Tensor gmem_op_q = make_tensor(init_quantized_iterator(), operand_layout); // While the scales are expected to have shape [MN, G, L] but with a stride to allow broadcasting // It is expected that K % G == 0 Tensor gmem_scale_broadcasted = make_tensor(make_gmem_ptr(scale_buffer), broadcasted_scale_layout); Tensor gmem_zero_broadcasted = make_tensor(make_gmem_ptr(zero_buffer), broadcasted_scale_layout); // Assign 1 thread per element in the thread block auto blk_shape = make_shape(size<0>(thr_layout), _1{}, _1{}); // auto blk_coord = make_coord(_, blockIdx.x, blockIdx.y); // (MN, K, L) // Tile across the block auto gOp_dq = local_tile(gmem_op_dq, blk_shape, blk_coord); auto gScale = local_tile(gmem_scale_broadcasted, blk_shape, blk_coord); auto gZero = local_tile(gmem_zero_broadcasted, blk_shape, blk_coord); auto gOp_q = local_tile(gmem_op_q, blk_shape, blk_coord); auto tOpDq_gOpDq = local_partition(gOp_dq, thr_layout, threadIdx.x); auto tScale_gScale = local_partition(gScale, thr_layout, threadIdx.x); auto tZero_gZero = local_partition(gZero, thr_layout, threadIdx.x); auto tOpQ_gOpQ = local_partition(gOp_q, thr_layout, threadIdx.x); // Make a fragment of registers to hold gmem loads Tensor rmem_op_q = make_fragment_like(tOpQ_gOpQ(_, _, _, 0)); Tensor rmem_scale = make_fragment_like(tScale_gScale(_, _, _, 0)); Tensor rmem_zero = make_fragment_like(tZero_gZero(_, _, _, 0)); Tensor rmem_op_dq = make_fragment_like(tOpDq_gOpDq(_, _, _, 0)); Tensor rmem_op_scaled = make_fragment_like<ElementScale>(rmem_op_dq); Tensor rmem_zero_buf = make_fragment_like<ElementScale>(rmem_zero); Tensor pred_id = make_identity_tensor(shape(operand_layout)); auto pred_blk_tile = local_tile(pred_id, blk_shape, blk_coord); auto pred_thr_partition = local_partition(pred_blk_tile, thr_layout, threadIdx.x); const auto num_iters = size<3>(tOpDq_gOpDq); for (int ii = 0; ii < num_iters; ++ii) { const auto thread_offset = get<0>(pred_thr_partition(0, 0, 0, ii)); if (thread_offset < size<0>(operand_layout)) { copy(tOpQ_gOpQ(_, _, _, ii), rmem_op_q); copy(tScale_gScale(_, _, _, ii), rmem_scale); copy(tZero_gZero(_, _, _, ii), rmem_zero); transform(rmem_op_q, rmem_op_scaled, [] (const QuantizedElement& elt) { return ElementScale(elt); } ); transform(rmem_zero, rmem_zero_buf, [] (const ElementZero& elt) { return ElementScale(elt); } ); transform(rmem_op_scaled, rmem_scale, rmem_op_scaled, multiplies{}); transform(rmem_op_scaled, rmem_zero_buf, rmem_op_scaled, plus{}); transform(rmem_op_scaled, rmem_op_dq, [] (const ElementScale& elt) { return DequantizedElement(elt); } ); copy(rmem_op_dq, tOpDq_gOpDq(_, _, _, ii)); } } } template <class QuantizedElement, class DequantizedElement, class OperandLayout, class ElementScale, class ElementZero, class ScaleLayout> void dequantize_weight(DequantizedElement* dq_buffer, QuantizedElement const* q_buffer, OperandLayout const operand_layout, ElementScale const* scale_buffer, ElementZero const* zero_buffer, ScaleLayout const scale_layout, int const group_size) { using namespace cute; constexpr int tpb = 128; auto thr_layout = make_layout(make_shape(Int<tpb>{})); const auto num_rows = get<0>(shape(operand_layout)); const auto gemm_k = get<1>(shape(operand_layout)); // [MN, K, L] const auto batches = get<2>(shape(operand_layout)); // [MN, K, L] const auto scale_k = get<1>(shape(scale_layout)); // [MN, Scale_K, L] if (num_rows != size<0>(scale_layout)) { std::cerr << "Invalid first dimension for scales. Must match first dim for weights." << " But got shapes " << shape(operand_layout) << " " << shape(scale_layout) << std::endl; exit(-1); } const auto scale_stride0 = get<0>(stride(scale_layout)); const auto scale_stride1 = get<1>(stride(scale_layout)); const auto scale_stride2 = get<2>(stride(scale_layout)); auto scale_shape_bcast = make_shape(num_rows, make_shape(group_size, scale_k), batches); auto scale_stride_bcast = make_stride(scale_stride0, make_stride(0, scale_stride1), scale_stride2); auto scale_layout_bcast = make_layout(scale_shape_bcast, scale_stride_bcast); const auto blocks_x = gemm_k; const auto blocks_y = batches; dim3 blocks(blocks_x, blocks_y, 1); dequantize_weight_kernel<<<blocks, tpb>>>(dq_buffer, q_buffer, operand_layout, scale_buffer, zero_buffer, scale_layout_bcast, thr_layout); CUDA_CHECK(cudaDeviceSynchronize()); }
examples/55_hopper_mixed_dtype_gemm/unfused_weight_dequantize.hpp/0
{ "file_path": "examples/55_hopper_mixed_dtype_gemm/unfused_weight_dequantize.hpp", "repo_id": "examples", "token_count": 3052 }
12
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/util/type_traits.hpp> #include <cute/atom/mma_atom.hpp> #include <cute/algorithm/axpby.hpp> #include <cute/algorithm/functional.hpp> #include <cute/algorithm/gemm.hpp> #include <cute/tensor.hpp> namespace cute { // // Cooperative Shared-Memory GEMMs // namespace detail { // Predicated Cooperative GEMM template <class... Args, class Alpha, class TA, class ALayout, class TB, class BLayout, class Beta, class TC, class CLayout, class ALoadTransformOp, class BLoadTransformOp, class CLoadTransformOp, class CStoreTransformOp, __CUTE_REQUIRES(ALayout::rank == 2 && is_smem<TA>::value && BLayout::rank == 2 && is_smem<TB>::value && CLayout::rank == 2 && is_smem<TC>::value)> CUTE_HOST_DEVICE void cooperative_gemm_predication(ThrMMA<Args...> const& thr_mma, Alpha const& alpha, Tensor<TA, ALayout> sA, Tensor<TB, BLayout> sB, Beta const& beta, Tensor<TC, CLayout> sC, ALoadTransformOp const& sA_load_op, // transforms A values before use in GEMM BLoadTransformOp const& sB_load_op, // transforms B values before use in GEMM CLoadTransformOp const& sC_load_op, // transforms C values before use in GEMM CStoreTransformOp const& sC_store_op) // transforms results before they are stored to C { using TypeA = typename TA::value_type; using TypeB = typename TB::value_type; using TypeC = typename TC::value_type; // Original, static size of the problem auto M = size<0>(sC); auto N = size<1>(sC); auto K = size<1>(sA); // Block size of the compute tile auto BLK_M = tile_size<0>(thr_mma); auto BLK_N = tile_size<1>(thr_mma); auto BLK_K = tile_size<2>(thr_mma); // // MMA Partitioning // // Round the layout extents up to BLK_X to satisfy MMA partitioning safety Tensor rounded_sA = sA.compose(make_shape(round_up(M, BLK_M), round_up(K, BLK_K))); Tensor rounded_sB = sB.compose(make_shape(round_up(N, BLK_N), round_up(K, BLK_K))); Tensor rounded_sC = sC.compose(make_shape(round_up(M, BLK_M), round_up(N, BLK_N))); // Partition the sA and sB tiles across the threads for the MMA Tensor tCsA = thr_mma.partition_A(rounded_sA); // (MMA,MMA_M,MMA_K) Tensor tCsB = thr_mma.partition_B(rounded_sB); // (MMA,MMA_N,MMA_K) Tensor tCsC = thr_mma.partition_C(rounded_sC); // (MMA,MMA_M,MMA_N) // Create register tensors for the MMA to operate on Tensor tCrA = thr_mma.make_fragment_A(tCsA); // (MMA,MMA_M,MMA_K) Tensor tCrB = thr_mma.make_fragment_B(tCsB); // (MMA,MMA_N,MMA_K) Tensor tCrC = thr_mma.make_fragment_C(tCsC); // (MMA,MMA_M,MMA_N) #if 0 if (thread0()) { print(" sA: "); print( sA); print("\n"); print(" sB: "); print( sB); print("\n"); print(" sC: "); print( sC); print("\n"); print("r_sA: "); print(rounded_sA); print("\n"); print("r_sB: "); print(rounded_sB); print("\n"); print("r_sC: "); print(rounded_sC); print("\n"); print(thr_mma); print("tCsA: "); print(tCsA); print("\n"); print("tCsB: "); print(tCsB); print("\n"); print("tCsC: "); print(tCsC); print("\n"); print("tCrA: "); print(tCrA); print("\n"); print("tCrB: "); print(tCrB); print("\n"); print("tCrC: "); print(tCrC); print("\n"); } #endif // // PREDICATION // // Create coordinate tensors for the problem Tensor cA = make_identity_tensor(shape(rounded_sA)); // (M,K) -> (m,k) Tensor cB = make_identity_tensor(shape(rounded_sB)); // (N,K) -> (n,k) // Repeat partitioning with thr_mma Tensor tCcA = thr_mma.partition_A(cA); // (MMA,MMA_M,MMA_K) -> (m,k) Tensor tCcB = thr_mma.partition_B(cB); // (MMA,MMA_N,MMA_K) -> (n,k) // Allocate the preds for MMA- and MMA_MN-modes Tensor tCpA = make_tensor<bool>(make_shape(size<0>(tCsA), size<1>(tCsA))); Tensor tCpB = make_tensor<bool>(make_shape(size<0>(tCsB), size<1>(tCsB))); // Populate the predicates on M and N CUTE_UNROLL for (int i = 0; i < size(tCpA); ++i) { tCpA(i) = elem_less(get<0>(tCcA(_,_,Int<0>{})(i)), shape<0>(sA)); } CUTE_UNROLL for (int i = 0; i < size(tCpB); ++i) { tCpB(i) = elem_less(get<0>(tCcB(_,_,Int<0>{})(i)), shape<0>(sB)); } #if 0 if (thread0()) { print(" cA: "); print( cA); print("\n"); print(" cB: "); print( cB); print("\n"); print("tCcA: "); print(tCcA); print("\n"); print("tCcB: "); print(tCcB); print("\n"); print_tensor(tCpA); print_tensor(tCpB); } #endif // // PREFETCH k_block = 0 // Condition the k-predication on (static) k_block == K_BLOCK_MAX-1, the last k_block // Assumes the MMA-tiling in K is trivial // constexpr int K_BLOCK_MAX = size<2>(tCrA); CUTE_UNROLL for (int m = 0; m < size<1>(tCrA); ++m) { // Copy MMA_M CUTE_UNROLL for (int i = 0; i < size<0>(tCrA); ++i) { // Copy MMA_I tCrA(i,m,0) = (tCpA(i,m) && (0 < K_BLOCK_MAX-1 || elem_less(get<1>(tCcA(i,m,0)), shape<1>(sA)))) ? sA_load_op(tCsA(i,m,0)) : TypeA{}; } } CUTE_UNROLL for (int n = 0; n < size<1>(tCrB); ++n) { // Copy MMA_N CUTE_UNROLL for (int i = 0; i < size<0>(tCrB); ++i) { // Copy MMA_I tCrB(i,n,0) = (tCpB(i,n) && (0 < K_BLOCK_MAX-1 || elem_less(get<1>(tCcB(i,n,0)), shape<1>(sB)))) ? sB_load_op(tCsB(i,n,0)) : TypeB{}; } } // // MAINLOOP // // Clear accumulators clear(tCrC); CUTE_UNROLL for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block) { if (k_block < K_BLOCK_MAX-1) // static-if not the last k_block { int k_next = k_block + 1; // Load k_next block // Condition the k-predication on (static) k_block == K_BLOCK_MAX-1, the last k_block // Assumes the MMA-tiling in K is trivial CUTE_UNROLL for (int m = 0; m < size<1>(tCrA); ++m) { // Copy MMA_M CUTE_UNROLL for (int i = 0; i < size<0>(tCrA); ++i) { // Copy MMA_I tCrA(i,m,k_next) = (tCpA(i,m) && (k_next < K_BLOCK_MAX-1 || elem_less(get<1>(tCcA(i,m,k_next)), shape<1>(sA)))) ? sA_load_op(tCsA(i,m,k_next)) : TypeA{}; } } CUTE_UNROLL for (int n = 0; n < size<1>(tCrB); ++n) { // Copy MMA_N CUTE_UNROLL for (int i = 0; i < size<0>(tCrB); ++i) { // Copy MMA_I tCrB(i,n,k_next) = (tCpB(i,n) && (k_next < K_BLOCK_MAX-1 || elem_less(get<1>(tCcB(i,n,k_next)), shape<1>(sB)))) ? sB_load_op(tCsB(i,n,k_next)) : TypeB{}; } } } // GEMM on k_block in registers gemm(thr_mma, tCrA(_,_,k_block), tCrB(_,_,k_block), tCrC); } // // Epilogue // // Create coordinate tensors for the problem Tensor cC = make_identity_tensor(shape(rounded_sC)); // (M,N) -> (m,n) // Repeat partitioning with thr_mma Tensor tCcC = thr_mma.partition_C(cC); // (MMA,MMA_M,MMA_N) -> (m,n) const bool isBetaZero = (beta == Beta{}); // Custom axpby_if for now CUTE_UNROLL for (int i = 0; i < size(tCrC); ++i) { if (elem_less(tCcC(i), shape(sC))) { tCsC(i) = sC_store_op(isBetaZero ? alpha * static_cast<TypeC>(tCrC(i)) : alpha * static_cast<TypeC>(tCrC(i)) + beta * static_cast<TypeC>(sC_load_op(tCsC(i)))); } } } // Slow fallback path template <class... Args, class Alpha, class TA, class ALayout, class TB, class BLayout, class Beta, class TC, class CLayout, class ALoadTransformOp, class BLoadTransformOp, class CLoadTransformOp, class CStoreTransformOp, __CUTE_REQUIRES(ALayout::rank == 2 && is_smem<TA>::value && BLayout::rank == 2 && is_smem<TB>::value && CLayout::rank == 2 && is_smem<TC>::value)> CUTE_HOST_DEVICE void cooperative_gemm_predication(uint32_t thread_idx, TiledMMA<Args...> const& tiled_mma, Alpha const& alpha, Tensor<TA, ALayout> sA, Tensor<TB, BLayout> sB, Beta const& beta, Tensor<TC, CLayout> sC, ALoadTransformOp const& sA_load_op, // transforms A values before use in GEMM BLoadTransformOp const& sB_load_op, // transforms B values before use in GEMM CLoadTransformOp const& sC_load_op, // transforms C values before use in GEMM CStoreTransformOp const& sC_store_op) // transforms results before they are stored to C { // ThrMMA auto thr_mma = tiled_mma.get_thread_slice(thread_idx); cooperative_gemm_predication(thr_mma, alpha, sA, sB, beta, sC, sA_load_op, sB_load_op, sC_load_op, sC_store_op); } // Unpredicated Cooperative GEMM template <class SmemCopyOpA, class SmemCopyOpB, class SmemCopyOpC, class... Args, class Alpha, class TA, class ALayout, class TB, class BLayout, class Beta, class TC, class CLayout, class ALoadTransformOp, class BLoadTransformOp, class CLoadTransformOp, class CStoreTransformOp, __CUTE_REQUIRES(ALayout::rank == 2 && is_smem<TA>::value && BLayout::rank == 2 && is_smem<TB>::value && CLayout::rank == 2 && is_smem<TC>::value)> CUTE_HOST_DEVICE void cooperative_gemm_no_predication(uint32_t thread_idx, TiledMMA<Args...> const& tiled_mma, Alpha const& alpha, Tensor<TA, ALayout> sA, Tensor<TB, BLayout> sB, Beta const& beta, Tensor<TC, CLayout> sC, ALoadTransformOp const& sA_load_op, // transforms A values before use in GEMM BLoadTransformOp const& sB_load_op, // transforms B values before use in GEMM CLoadTransformOp const& sC_load_op, // transforms C values before use in GEMM CStoreTransformOp const& sC_store_op) // transforms results before they are stored to C { using TypeA = typename TA::value_type; using TypeB = typename TB::value_type; using TypeC = typename TC::value_type; // ThrMMA auto thr_mma = tiled_mma.get_thread_slice(thread_idx); // // MMA Partitioning // Tensor tCsC = thr_mma.partition_C(sC); // Create register tensors for the MMA to operate on Tensor tCrA = thr_mma.partition_fragment_A(sA); // (MMA,MMA_M,MMA_K) Tensor tCrB = thr_mma.partition_fragment_B(sB); // (MMA,MMA_N,MMA_K) Tensor tCrC = thr_mma.make_fragment_C(tCsC); // (MMA,MMA_M,MMA_N) using CopyOpAType = SmemCopyOpA; using CopyOpBType = SmemCopyOpB; auto smem_tiled_copy_A = make_tiled_copy_A(Copy_Atom<CopyOpAType, TypeA>{}, thr_mma); auto smem_thr_copy_A = smem_tiled_copy_A.get_thread_slice(thread_idx); Tensor tCsA = smem_thr_copy_A.partition_S(sA); Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA); CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // CPY_M CUTE_STATIC_ASSERT_V(size<2>(tCsA) == size<2>(tCrA_copy_view)); // CPY_K auto smem_tiled_copy_B = make_tiled_copy_B(Copy_Atom<CopyOpBType, TypeB>{}, thr_mma); auto smem_thr_copy_B = smem_tiled_copy_B.get_thread_slice(thread_idx); Tensor tCsB = smem_thr_copy_B.partition_S(sB); Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB); CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // CPY_N CUTE_STATIC_ASSERT_V(size<2>(tCsB) == size<2>(tCrB_copy_view)); // CPY_K #if 0 if (thread0()) { print(" sA: "); print(sA); print("\n"); print(" sB: "); print(sB); print("\n"); print(" sC: "); print(sC); print("\n"); print(thr_mma); print("\n"); print("tCsC: "); print(tCsC); print("\n"); print("tCrA: "); print(tCrA); print("\n"); print("tCrB: "); print(tCrB); print("\n"); print("tCrC: "); print(tCrC); print("\n"); print(smem_thr_copy_A); print("\n"); print("tCsA: "); print(tCsA); print("\n"); print("tCrA_copy_view: "); print(tCrA_copy_view); print("\n"); print(smem_thr_copy_B); print("\n"); print("tCsB: "); print(tCsB); print("\n"); print("tCrB_copy_view: "); print(tCrB_copy_view); print("\n"); } #endif // // PREFETCH // copy(smem_tiled_copy_A, tCsA(_,_,Int<0>{}), tCrA_copy_view(_,_,Int<0>{})); copy(smem_tiled_copy_B, tCsB(_,_,Int<0>{}), tCrB_copy_view(_,_,Int<0>{})); // // MAINLOOP // // Clear accumulators clear(tCrC); constexpr int K_BLOCK_MAX = size<2>(tCrA); CUTE_UNROLL for (int k_block = 0; k_block < K_BLOCK_MAX; ++k_block) { // static-if load the next k_block. No k-predication required on these loads. if (k_block < K_BLOCK_MAX-1) { // Load the next k_block int k_next = k_block + 1; // statically unrolled copy(smem_tiled_copy_A, tCsA(_,_,k_next), tCrA_copy_view(_,_,k_next)); copy(smem_tiled_copy_B, tCsB(_,_,k_next), tCrB_copy_view(_,_,k_next)); } // Transform A and B, relying on the compiler to remove in case of identity ops cute::transform(tCrA(_,_,k_block), sA_load_op); cute::transform(tCrB(_,_,k_block), sB_load_op); // GEMM on k_block in registers gemm(thr_mma, tCrA(_,_,k_block), tCrB(_,_,k_block), tCrC); } // // Epilogue // auto isBetaZero = [&] () { if constexpr (is_complex<Beta>::value) { return beta.real() == Int<0>{} && beta.imag() == Int<0>{}; } else { return beta == Int<0>{}; } CUTE_GCC_UNREACHABLE; } (); using CopyOpCType = SmemCopyOpC; Tensor tCrD = thr_mma.make_fragment_C(tCsC); if(!isBetaZero) { copy(CopyOpCType{}, tCsC, tCrD); // Transform C on/after load cute::transform(tCrD, sC_load_op); } // C = alpha * (A * B) + beta * C axpby(alpha, tCrC, beta, tCrD); // Transform C before/on store cute::transform(tCrD, sC_store_op); copy(CopyOpCType{}, tCrD, tCsC); } } // end namespace detail template <class SmemCopyOpA, class SmemCopyOpB, class SmemCopyOpC, class... Args, class Alpha, class TA, class ALayout, class TB, class BLayout, class Beta, class TC, class CLayout, class ALoadTransformOp = cute::identity, class BLoadTransformOp = cute::identity, class CLoadTransformOp = cute::identity, class CStoreTransformOp = cute::identity, __CUTE_REQUIRES(ALayout::rank == 2 && is_smem<TA>::value && BLayout::rank == 2 && is_smem<TB>::value && CLayout::rank == 2 && is_smem<TC>::value)> CUTE_HOST_DEVICE void cooperative_gemm(uint32_t thread_idx, TiledMMA<Args...> const& tiled_mma, Alpha const& alpha, Tensor<TA, ALayout> sA, Tensor<TB, BLayout> sB, Beta const& beta, Tensor<TC, CLayout> sC, ALoadTransformOp const& sA_load_op = {}, // transforms A values before use in GEMM BLoadTransformOp const& sB_load_op = {}, // transforms B values before use in GEMM CLoadTransformOp const& sC_load_op = {}, // transforms C values before use in GEMM CStoreTransformOp const& sC_store_op = {}) // transforms results before they are stored to C { CUTE_STATIC_ASSERT_V(size<0>(sA) == size<0>(sC)); // AM == CM CUTE_STATIC_ASSERT_V(size<0>(sB) == size<1>(sC)); // BN == CN CUTE_STATIC_ASSERT_V(size<1>(sA) == size<1>(sB)); // AK == BK using TypeA = typename TA::value_type; using TypeB = typename TB::value_type; using TypeC = typename TC::value_type; static_assert(is_convertible_v<decay_t<invoke_result_t<ALoadTransformOp, TypeA>>, TypeA>, "ALoadTransformOp functor must accept value of type TA::value_type and return value convertible to type TA::value_type"); static_assert(is_convertible_v<decay_t<invoke_result_t<BLoadTransformOp, TypeB>>, TypeB>, "BLoadTransformOp functor must accept value of type TB::value_type and return value convertible to type TB::value_type"); static_assert(is_convertible_v<decay_t<invoke_result_t<CLoadTransformOp, TypeC>>, TypeC>, "CLoadTransformOp functor must accept value of type TC::value_type and return value convertible to type TC::value_type"); static_assert(is_convertible_v<decay_t<invoke_result_t<CStoreTransformOp, TypeC>>, TypeC>, "CStoreTransformOp functor must accept value of type TC::value_type and return value convertible to type TC::value_type"); static constexpr bool compat = weakly_compatible(tile_shape(TiledMMA<Args...>{}), make_shape(size<0>(sA), size<0>(sB), size<1>(sA))); if constexpr (compat) { detail::cooperative_gemm_no_predication<SmemCopyOpA, SmemCopyOpB, SmemCopyOpC>( thread_idx, tiled_mma, alpha, sA, sB, beta, sC, sA_load_op, sB_load_op, sC_load_op, sC_store_op ); } else { detail::cooperative_gemm_predication( thread_idx, tiled_mma, alpha, sA, sB, beta, sC, sA_load_op, sB_load_op, sC_load_op, sC_store_op ); } } template <class... Args, class Alpha, class TA, class ALayout, class TB, class BLayout, class Beta, class TC, class CLayout, class ALoadTransformOp = cute::identity, class BLoadTransformOp = cute::identity, class CLoadTransformOp = cute::identity, class CStoreTransformOp = cute::identity, __CUTE_REQUIRES(ALayout::rank == 2 && is_smem<TA>::value && BLayout::rank == 2 && is_smem<TB>::value && CLayout::rank == 2 && is_smem<TC>::value)> CUTE_HOST_DEVICE void cooperative_gemm(uint32_t thread_idx, TiledMMA<Args...> const& tiled_mma, Alpha const& alpha, Tensor<TA, ALayout> sA, Tensor<TB, BLayout> sB, Beta const& beta, Tensor<TC, CLayout> sC, ALoadTransformOp const& sA_load_op = {}, // transforms A values before use in GEMM BLoadTransformOp const& sB_load_op = {}, // transforms B values before use in GEMM CLoadTransformOp const& sC_load_op = {}, // transforms C values before use in GEMM CStoreTransformOp const& sC_store_op = {}) // transforms results before they are stored to C { using CopyOpA = AutoVectorizingCopyWithAssumedAlignment<sizeof_bits_v<typename TA::value_type>>; using CopyOpB = AutoVectorizingCopyWithAssumedAlignment<sizeof_bits_v<typename TB::value_type>>; using CopyOpC = AutoVectorizingCopyWithAssumedAlignment<sizeof_bits_v<typename TC::value_type>>; cooperative_gemm<CopyOpA, CopyOpB, CopyOpC>( thread_idx, tiled_mma, alpha, sA, sB, beta, sC, sA_load_op, sB_load_op, sC_load_op, sC_store_op ); } // Legacy overload of cute::gemm for backwards-compatibility template <class... Args, class Alpha, class TA, class ALayout, class TB, class BLayout, class Beta, class TC, class CLayout, class ALoadTransformOp = cute::identity, class BLoadTransformOp = cute::identity, class CLoadTransformOp = cute::identity, class CStoreTransformOp = cute::identity, __CUTE_REQUIRES(ALayout::rank == 2 && is_smem<TA>::value && BLayout::rank == 2 && is_smem<TB>::value && CLayout::rank == 2 && is_smem<TC>::value)> CUTE_HOST_DEVICE void gemm(ThrMMA<Args...> const& thr_mma, Alpha const& alpha, Tensor<TA, ALayout> sA, Tensor<TB, BLayout> sB, Beta const& beta, Tensor<TC, CLayout> sC, ALoadTransformOp const& sA_load_op = {}, // transforms A values before use in GEMM BLoadTransformOp const& sB_load_op = {}, // transforms B values before use in GEMM CLoadTransformOp const& sC_load_op = {}, // transforms C values before use in GEMM CStoreTransformOp const& sC_store_op = {}) // transforms results before they are stored to C { // Goes directly to the slow path to avoid getting thread_idx from thr_mma detail::cooperative_gemm_predication( thr_mma, alpha, sA, sB, beta, sC, sA_load_op, sB_load_op, sC_load_op, sC_store_op ); } } // end namespace cute
include/cute/algorithm/cooperative_gemm.hpp/0
{ "file_path": "include/cute/algorithm/cooperative_gemm.hpp", "repo_id": "include", "token_count": 10822 }
13
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/arch/copy.hpp> #include <cute/arch/copy_sm90.hpp> namespace cute { //////////////////////////////////////////////////////////////////////////////////////////////////// /// TMA_LOAD : Initiates a TMA copy from global memory to shared memory //////////////////////////////////////////////////////////////////////////////////////////////////// struct SM90_TMA_LOAD_1D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& crd0) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.1d.shared::cluster.global.mbarrier::complete_tx::bytes" " [%0], [%1, {%3}], [%2];" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "r"(crd0) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } struct PREFETCH { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& crd0) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); asm volatile ( "cp.async.bulk.prefetch.tensor.1d.L2.global" " [%0, {%1}];" : : "l"(gmem_int_desc), "r"(crd0) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; }; struct SM90_TMA_LOAD_2D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& crd0, int32_t const& crd1) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes" " [%0], [%1, {%3, %4}], [%2];" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "r"(crd0), "r"(crd1) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } struct PREFETCH { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& crd0, int32_t const& crd1) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); asm volatile ( "cp.async.bulk.prefetch.tensor.2d.L2.global" " [%0, {%1, %2}];" : : "l"(gmem_int_desc), "r"(crd0), "r"(crd1) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; }; struct SM90_TMA_LOAD_3D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes" " [%0], [%1, {%3, %4, %5}], [%2];" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "r"(crd0), "r"(crd1), "r"(crd2) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } struct PREFETCH { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); asm volatile ( "cp.async.bulk.prefetch.tensor.3d.L2.global" " [%0, {%1, %2, %3}];" : : "l"(gmem_int_desc), "r"(crd0), "r"(crd1), "r"(crd2) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; }; struct SM90_TMA_LOAD_4D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes" " [%0], [%1, {%3, %4, %5, %6}], [%2];" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } struct PREFETCH { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); asm volatile ( "cp.async.bulk.prefetch.tensor.4d.L2.global" " [%0, {%1, %2, %3, %4}];" : : "l"(gmem_int_desc), "r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; }; struct SM90_TMA_LOAD_5D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes" " [%0], [%1, {%3, %4, %5, %6, %7}], [%2];" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3), "r"(crd4) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } struct PREFETCH { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); asm volatile ( "cp.async.bulk.prefetch.tensor.5d.L2.global" " [%0, {%1, %2, %3, %4, %5}];" : : "l"(gmem_int_desc), "r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3), "r"(crd4) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; }; struct SM90_TMA_LOAD { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& crd0) { return SM90_TMA_LOAD_1D::copy(desc_ptr, mbar_ptr, smem_ptr, crd0); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& crd0, int32_t const& crd1) { return SM90_TMA_LOAD_2D::copy(desc_ptr, mbar_ptr, smem_ptr, crd0, crd1); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2) { return SM90_TMA_LOAD_3D::copy(desc_ptr, mbar_ptr, smem_ptr, crd0, crd1, crd2); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3) { return SM90_TMA_LOAD_4D::copy(desc_ptr, mbar_ptr, smem_ptr, crd0, crd1, crd2, crd3); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4) { return SM90_TMA_LOAD_5D::copy(desc_ptr, mbar_ptr, smem_ptr, crd0, crd1, crd2, crd3, crd4); } struct PREFETCH { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& crd0) { return SM90_TMA_LOAD_1D::PREFETCH::copy(desc_ptr, crd0); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& crd0, int32_t const& crd1) { return SM90_TMA_LOAD_2D::PREFETCH::copy(desc_ptr, crd0, crd1); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2) { return SM90_TMA_LOAD_3D::PREFETCH::copy(desc_ptr, crd0, crd1, crd2); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3) { return SM90_TMA_LOAD_4D::PREFETCH::copy(desc_ptr, crd0, crd1, crd2, crd3); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4) { return SM90_TMA_LOAD_5D::PREFETCH::copy(desc_ptr, crd0, crd1, crd2, crd3, crd4); } }; }; //////////////////////////////////////////////////////////////////////////////////////////////////// /// TMA_LOAD im2col: Initiates a TMA copy, in im2col mode, from global memory to shared memory //////////////////////////////////////////////////////////////////////////////////////////////////// struct SM90_TMA_LOAD_IM2COL_3D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n, uint16_t const& offset_w) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); // Copy from global to shared::cluster. asm volatile ( "cp.async.bulk.tensor.3d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes" " [%0], [%1, {%3, %4, %5}], [%2], {%6};" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "r"(coord_c), "r"(coord_w), "r"(coord_n), "h"(offset_w) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } struct PREFETCH { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n, uint16_t const& offset_w) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); asm volatile ( "cp.async.bulk.prefetch.tensor.3d.L2.global.im2col" " [%0, {%1, %2, %3}], {%4};" : : "l"(gmem_int_desc), "r"(coord_c), "r"(coord_w), "r"(coord_n), "h"(offset_w) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; }; struct SM90_TMA_LOAD_IM2COL_4D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n, uint16_t const& offset_w, uint16_t const& offset_h) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); // Copy from global to shared::cluster. asm volatile ( "cp.async.bulk.tensor.4d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes" " [%0], [%1, {%3, %4, %5, %6}], [%2], {%7, %8};" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_n), "h"(offset_w), "h"(offset_h) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } struct PREFETCH { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n, uint16_t const& offset_w, uint16_t const& offset_h) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); asm volatile ( "cp.async.bulk.prefetch.tensor.4d.L2.global.im2col" " [%0, {%1, %2, %3, %4}], {%5, %6};" : : "l"(gmem_int_desc), "r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_n), "h"(offset_w), "h"(offset_h) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; }; struct SM90_TMA_LOAD_IM2COL_5D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n, uint16_t const& offset_w, uint16_t const& offset_h, uint16_t const& offset_d) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); // Copy from global to shared::cluster. asm volatile ( "cp.async.bulk.tensor.5d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes" " [%0], [%1, {%3, %4, %5, %6, %7}], [%2], {%8, %9, %10};" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_d), "r"(coord_n), "h"(offset_w), "h"(offset_h), "h"(offset_d) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } struct PREFETCH { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n, uint16_t const& offset_w, uint16_t const& offset_h, uint16_t const& offset_d) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); asm volatile ( "cp.async.bulk.prefetch.tensor.5d.L2.global.im2col" " [%0, {%1, %2, %3, %4, %5}], {%6, %7, %8};" : : "l"(gmem_int_desc), "r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_d), "r"(coord_n), "h"(offset_w), "h"(offset_h), "h"(offset_d) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; }; struct SM90_TMA_LOAD_IM2COL { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n, uint16_t const& offset_w) { return SM90_TMA_LOAD_IM2COL_3D::copy(desc_ptr, mbar_ptr, smem_ptr, coord_c, coord_w, coord_n, offset_w); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n, uint16_t const& offset_w, uint16_t const& offset_h) { return SM90_TMA_LOAD_IM2COL_4D::copy(desc_ptr, mbar_ptr, smem_ptr, coord_c, coord_w, coord_h, coord_n, offset_w, offset_h); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n, uint16_t const& offset_w, uint16_t const& offset_h, uint16_t const& offset_d) { return SM90_TMA_LOAD_IM2COL_5D::copy(desc_ptr, mbar_ptr, smem_ptr, coord_c, coord_w, coord_h, coord_d, coord_n, offset_w, offset_h, offset_d); } struct PREFETCH { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n, uint16_t const& offset_w) { return SM90_TMA_LOAD_IM2COL_3D::PREFETCH::copy(desc_ptr, coord_c, coord_w, coord_n, offset_w); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n, uint16_t const& offset_w, uint16_t const& offset_h) { return SM90_TMA_LOAD_IM2COL_4D::PREFETCH::copy(desc_ptr, coord_c, coord_w, coord_h, coord_n, offset_w, offset_h); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n, uint16_t const& offset_w, uint16_t const& offset_h, uint16_t const& offset_d) { return SM90_TMA_LOAD_IM2COL_5D::PREFETCH::copy(desc_ptr, coord_c, coord_w, coord_h, coord_d, coord_n, offset_w, offset_h, offset_d); } }; }; //////////////////////////////////////////////////////////////////////////////////////////////////// /// TMA_LOAD_MULTICAST: Initiates a TMA copy from global memory to shared memory //////////////////////////////////////////////////////////////////////////////////////////////////// struct SM90_TMA_LOAD_MULTICAST_1D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& crd0) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.1d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster" " [%0], [%1, {%4}], [%2], %3;" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "h"(multicast_mask), "r"(crd0) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_LOAD_MULTICAST_2D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& crd0, int32_t const& crd1) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster" " [%0], [%1, {%4, %5}], [%2], %3;" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "h"(multicast_mask), "r"(crd0), "r"(crd1) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_LOAD_MULTICAST_3D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster" " [%0], [%1, {%4, %5, %6}], [%2], %3;" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "h"(multicast_mask), "r"(crd0), "r"(crd1), "r"(crd2) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_LOAD_MULTICAST_4D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster" " [%0], [%1, {%4, %5, %6, %7}], [%2], %3;" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "h"(multicast_mask), "r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_LOAD_MULTICAST_5D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster" " [%0], [%1, {%4, %5, %6, %7, %8}], [%2], %3;" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "h"(multicast_mask), "r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3), "r"(crd4) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_LOAD_MULTICAST { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& crd0) { return SM90_TMA_LOAD_MULTICAST_1D::copy(desc_ptr, mbar_ptr, multicast_mask, smem_ptr, crd0); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& crd0, int32_t const& crd1) { return SM90_TMA_LOAD_MULTICAST_2D::copy(desc_ptr, mbar_ptr, multicast_mask, smem_ptr, crd0, crd1); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2) { return SM90_TMA_LOAD_MULTICAST_3D::copy(desc_ptr, mbar_ptr, multicast_mask, smem_ptr, crd0, crd1, crd2); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3) { return SM90_TMA_LOAD_MULTICAST_4D::copy(desc_ptr, mbar_ptr, multicast_mask, smem_ptr, crd0, crd1, crd2, crd3); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4) { return SM90_TMA_LOAD_MULTICAST_5D::copy(desc_ptr, mbar_ptr, multicast_mask, smem_ptr, crd0, crd1, crd2, crd3, crd4); } using PREFETCH = typename SM90_TMA_LOAD::PREFETCH; }; //////////////////////////////////////////////////////////////////////////////////////////////////// /// TMA_LOAD_MULTICAST im2col: Initiates a TMA copy, in im2col mode, from global memory to shared memory //////////////////////////////////////////////////////////////////////////////////////////////////// struct SM90_TMA_LOAD_IM2COL_MULTICAST_3D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n, uint16_t const& offset_w) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); // Copy from global to shared::cluster. asm volatile ( "cp.async.bulk.tensor.3d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster" " [%0], [%1, {%3, %4, %5}], [%2], {%6}, %7;" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "r"(coord_c), "r"(coord_w), "r"(coord_n), "h"(offset_w), "h"(multicast_mask) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_LOAD_IM2COL_MULTICAST_4D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n, uint16_t const& offset_w, uint16_t const& offset_h) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); // Copy from global to shared::cluster. asm volatile ( "cp.async.bulk.tensor.4d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster" " [%0], [%1, {%3, %4, %5, %6}], [%2], {%7, %8}, %9;" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_n), "h"(offset_w), "h"(offset_h), "h"(multicast_mask) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_LOAD_IM2COL_MULTICAST_5D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n, uint16_t const& offset_w, uint16_t const& offset_h, uint16_t const& offset_d) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); // Copy from global to shared::cluster. asm volatile ( "cp.async.bulk.tensor.5d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster" " [%0], [%1, {%3, %4, %5, %6, %7}], [%2], {%8, %9, %10}, %11;" : : "r"(smem_int_ptr), "l"(gmem_int_desc), "r"(smem_int_mbar), "r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_d), "r"(coord_n), "h"(offset_w), "h"(offset_h), "h"(offset_d), "h"(multicast_mask) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_LOAD_IM2COL_MULTICAST { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n, uint16_t const& offset_w) { return SM90_TMA_LOAD_IM2COL_MULTICAST_3D::copy(desc_ptr, mbar_ptr, multicast_mask, smem_ptr, coord_c, coord_w, coord_n, offset_w); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n, uint16_t const& offset_w, uint16_t const& offset_h) { return SM90_TMA_LOAD_IM2COL_MULTICAST_4D::copy(desc_ptr, mbar_ptr, multicast_mask, smem_ptr, coord_c, coord_w, coord_h, coord_n, offset_w, offset_h); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, uint64_t* mbar_ptr, uint16_t multicast_mask, void * smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n, uint16_t const& offset_w, uint16_t const& offset_h, uint16_t const& offset_d) { return SM90_TMA_LOAD_IM2COL_MULTICAST_5D::copy(desc_ptr, mbar_ptr, multicast_mask, smem_ptr, coord_c, coord_w, coord_h, coord_d, coord_n, offset_w, offset_h, offset_d); } using PREFETCH = typename SM90_TMA_LOAD_IM2COL::PREFETCH; }; //////////////////////////////////////////////////////////////////////////////////////////////////// /// TMA_STORE : Initiates a TMA copy from shared memory to global memory //////////////////////////////////////////////////////////////////////////////////////////////////// struct SM90_TMA_STORE_1D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& crd0) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.1d.global.shared::cta.bulk_group [%0, {%2}], [%1];" : : "l"(gmem_int_desc), "r"(smem_int_ptr), "r"(crd0) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_STORE_2D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& crd0, int32_t const& crd1) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.2d.global.shared::cta.bulk_group [%0, {%2, %3}], [%1];" : : "l"(gmem_int_desc), "r"(smem_int_ptr), "r"(crd0), "r"(crd1) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_STORE_3D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.3d.global.shared::cta.bulk_group [%0, {%2, %3, %4}], [%1];" : : "l"(gmem_int_desc), "r"(smem_int_ptr), "r"(crd0), "r"(crd1), "r"(crd2) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_STORE_4D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.4d.global.shared::cta.bulk_group [%0, {%2, %3, %4, %5}], [%1];" : : "l"(gmem_int_desc), "r"(smem_int_ptr), "r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_STORE_5D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.5d.global.shared::cta.bulk_group [%0, {%2, %3, %4, %5, %6}], [%1];" : : "l"(gmem_int_desc), "r"(smem_int_ptr), "r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3), "r"(crd4) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_STORE { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& crd0) { return SM90_TMA_STORE_1D::copy(desc_ptr, smem_ptr, crd0); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& crd0, int32_t const& crd1) { return SM90_TMA_STORE_2D::copy(desc_ptr, smem_ptr, crd0, crd1); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2) { return SM90_TMA_STORE_3D::copy(desc_ptr, smem_ptr, crd0, crd1, crd2); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3) { return SM90_TMA_STORE_4D::copy(desc_ptr, smem_ptr, crd0, crd1, crd2, crd3); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4) { return SM90_TMA_STORE_5D::copy(desc_ptr, smem_ptr, crd0, crd1, crd2, crd3, crd4); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// /// TMA_STORE im2col: Initiates a TMA copy, in im2col mode, from shared memory to global memory //////////////////////////////////////////////////////////////////////////////////////////////////// struct SM90_TMA_STORE_IM2COL_3D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.3d.global.shared::cta.im2col_no_offs.bulk_group" " [%0, {%2, %3, %4}], [%1];" : : "l"(gmem_int_desc), "r"(smem_int_ptr), "r"(coord_c), "r"(coord_w), "r"(coord_n) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_STORE_IM2COL_4D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.4d.global.shared::cta.im2col_no_offs.bulk_group" " [%0, {%2, %3, %4, %5}], [%1];" : : "l"(gmem_int_desc), "r"(smem_int_ptr), "r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_n) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_STORE_IM2COL_5D { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.async.bulk.tensor.5d.global.shared::cta.im2col_no_offs.bulk_group" " [%0, {%2, %3, %4, %5, %6}], [%1];" : : "l"(gmem_int_desc), "r"(smem_int_ptr), "r"(coord_c), "r"(coord_w), "r"(coord_h), "r"(coord_d), "r"(coord_n) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_STORE_IM2COL { CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_n) { return SM90_TMA_STORE_IM2COL_3D::copy(desc_ptr, smem_ptr, coord_c, coord_w, coord_n); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_n) { return SM90_TMA_STORE_IM2COL_4D::copy(desc_ptr, smem_ptr, coord_c, coord_w, coord_h, coord_n); } CUTE_HOST_DEVICE static void copy(void const* desc_ptr, void const* smem_ptr, int32_t const& coord_c, int32_t const& coord_w, int32_t const& coord_h, int32_t const& coord_d, int32_t const& coord_n) { return SM90_TMA_STORE_IM2COL_5D::copy(desc_ptr, smem_ptr, coord_c, coord_w, coord_h, coord_d, coord_n); } }; // Fence for smem stores for subsequent TMA_STORE CUTE_HOST_DEVICE static void tma_store_fence() { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) asm volatile ("fence.proxy.async.shared::cta;"); #elif defined(__CUDA_ARCH__) CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } // Indicate arrival of warp issuing TMA_STORE CUTE_HOST_DEVICE static void tma_store_arrive() { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) asm volatile("cp.async.bulk.commit_group;"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } // Wait until at most Count committed TMA_STOREs are pending and all prior commits are complete template <int Count> CUTE_HOST_DEVICE static void tma_store_wait() { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) asm volatile( "cp.async.bulk.wait_group.read %0;" : : "n"(Count) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } //////////////////////////////////////////////////////////////////////////////////////////////////// /// TMA_REDUCE_ADD : Initiates a TMA reduce-add from shared memory to global memory //////////////////////////////////////////////////////////////////////////////////////////////////// struct SM90_TMA_REDUCE_ADD_1D { CUTE_HOST_DEVICE static void copy(void const* const desc_ptr, void const* const smem_ptr, int32_t const& crd0) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.reduce.async.bulk.tensor.1d.global.shared::cta.add.bulk_group [%0, {%2}], [%1];" : : "l"(gmem_int_desc), "r"(smem_int_ptr), "r"(crd0) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_REDUCE_ADD_2D { CUTE_HOST_DEVICE static void copy(void const* const desc_ptr, void const* const smem_ptr, int32_t const& crd0, int32_t const& crd1) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.reduce.async.bulk.tensor.2d.global.shared::cta.add.bulk_group [%0, {%2, %3}], [%1];" : : "l"(gmem_int_desc), "r"(smem_int_ptr), "r"(crd0), "r"(crd1) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_REDUCE_ADD_3D { CUTE_HOST_DEVICE static void copy(void const* const desc_ptr, void const* const smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.reduce.async.bulk.tensor.3d.global.shared::cta.add.bulk_group [%0, {%2, %3, %4}], [%1];" : : "l"(gmem_int_desc), "r"(smem_int_ptr), "r"(crd0), "r"(crd1), "r"(crd2) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_REDUCE_ADD_4D { CUTE_HOST_DEVICE static void copy(void const* const desc_ptr, void const* const smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.reduce.async.bulk.tensor.4d.global.shared::cta.add.bulk_group [%0, {%2, %3, %4, %5}], [%1];" : : "l"(gmem_int_desc), "r"(smem_int_ptr), "r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_REDUCE_ADD_5D { CUTE_HOST_DEVICE static void copy(void const* const desc_ptr, void const* const smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile ( "cp.reduce.async.bulk.tensor.5d.global.shared::cta.add.bulk_group [%0, {%2, %3, %4, %5, %6}], [%1];" : : "l"(gmem_int_desc), "r"(smem_int_ptr), "r"(crd0), "r"(crd1), "r"(crd2), "r"(crd3), "r"(crd4) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use tma without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_TMA_REDUCE_ADD { CUTE_HOST_DEVICE static void copy(void const* const desc_ptr, void const* const smem_ptr, int32_t const& crd0) { return SM90_TMA_REDUCE_ADD_1D::copy(desc_ptr, smem_ptr, crd0); } CUTE_HOST_DEVICE static void copy(void const* const desc_ptr, void const* const smem_ptr, int32_t const& crd0, int32_t const& crd1) { return SM90_TMA_REDUCE_ADD_2D::copy(desc_ptr, smem_ptr, crd0, crd1); } CUTE_HOST_DEVICE static void copy(void const* const desc_ptr, void const* const smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2) { return SM90_TMA_REDUCE_ADD_3D::copy(desc_ptr, smem_ptr, crd0, crd1, crd2); } CUTE_HOST_DEVICE static void copy(void const* const desc_ptr, void const* const smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3) { return SM90_TMA_REDUCE_ADD_4D::copy(desc_ptr, smem_ptr, crd0, crd1, crd2, crd3); } CUTE_HOST_DEVICE static void copy(void const* const desc_ptr, void const* const smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2, int32_t const& crd3, int32_t const& crd4) { return SM90_TMA_REDUCE_ADD_5D::copy(desc_ptr, smem_ptr, crd0, crd1, crd2, crd3, crd4); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// /// BULK_COPY : Copy a bulk of memory between shared memory and global memory //////////////////////////////////////////////////////////////////////////////////////////////////// struct SM90_BULK_COPY_G2S { CUTE_HOST_DEVICE static void copy(void const* gmem_ptr, uint64_t* mbar_ptr, void * smem_ptr, int32_t load_bytes) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint32_t smem_int_mbar = cast_smem_ptr_to_uint(mbar_ptr); uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile("cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%0], [%1], %2, [%3];\n" : : "r"(smem_int_ptr), "l"(gmem_ptr), "r"(load_bytes), "r"(smem_int_mbar) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use BULK_COPY without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } struct PREFETCH { CUTE_HOST_DEVICE static void copy(void const* gmem_ptr, int32_t load_bytes) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) asm volatile("cp.async.bulk.prefetch.L2.global [%0], %1;\n" : : "l"(gmem_ptr), "r"(load_bytes) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use BULK_COPY without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; }; struct SM90_BULK_COPY_S2G { CUTE_HOST_DEVICE static void copy(void const* smem_ptr, void * gmem_ptr, int32_t store_bytes) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint32_t smem_int_ptr = cast_smem_ptr_to_uint(smem_ptr); asm volatile("cp.async.bulk.global.shared::cta.bulk_group [%0], [%1], %2;\n" : : "l"(gmem_ptr), "r"(smem_int_ptr), "r"(store_bytes) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use BULK_COPY without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } }; struct SM90_BULK_COPY_AUTO {}; //////////////////////////////////////////////////////////////////////////////////////////////////// } // end namespace cute
include/cute/arch/copy_sm90_tma.hpp/0
{ "file_path": "include/cute/arch/copy_sm90_tma.hpp", "repo_id": "include", "token_count": 24438 }
14
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once /*! \file \brief im2col make_tma_copy */ #include "cute/arch/copy_sm90.hpp" #include "cute/arch/copy_sm90_desc.hpp" #include "cute/tensor.hpp" #include "cute/algorithm/prefetch.hpp" namespace cute { // Utility for unpacking TMA_LOAD_IM2COL arguments into a CopyOp template <class CopyOp> struct TMA_LOAD_IM2COL_Unpack { /// Copy from src to dst. /// /// @param traits Copy traits created with a TMA descriptor that /// correctly matches the input tensor and other convolution /// parameters. /// /// @param src Tile of the im2col-transformed coordinate tensor /// (result of get_tma_tensor), representing the global-memory /// tensor from which to load. /// /// @param dst Shared memory tile, into which to load. template <class... Args, class TS, class SLayout, class TD, class DLayout> CUTE_HOST_DEVICE friend constexpr void copy_unpack(Copy_Traits<CopyOp, Args...> const& traits, Tensor<TS,SLayout> const& src, // tile of the transformed global activation (A) tensor Tensor<TD,DLayout> & dst) // shared memory tile { auto src_coord_offset = src(Int<0>{}); auto src_coord_cwhdn_offset_srt = flatten(src_coord_offset); // Interpret the TMA IM2COL coordinate as (c, ([w,h,d]), n, ([s,r,t])) CUTE_STATIC_ASSERT_V(rank(src_coord_offset) == _4{}); CUTE_STATIC_ASSERT_V(rank<1>(src_coord_offset) == rank<3>(src_coord_offset)); if constexpr (detail::is_prefetch<CopyOp>) { return detail::explode_tuple(detail::CallCOPY<CopyOp>{}, traits.opargs_, tuple_seq<decltype(traits.opargs_)>{}, src_coord_cwhdn_offset_srt, tuple_seq<decltype(src_coord_cwhdn_offset_srt)>{}); } else { static_assert(is_smem<TD>::value, "SM90_TMA_LOAD_IM2COL requires the destination be shared memory."); void* dst_ptr = cute::raw_pointer_cast(dst.data()); return detail::explode_tuple(detail::CallCOPY<CopyOp>{}, traits.opargs_, tuple_seq<decltype(traits.opargs_)>{}, make_tuple(dst_ptr), seq<0>{}, src_coord_cwhdn_offset_srt, tuple_seq<decltype(src_coord_cwhdn_offset_srt)>{}); } } }; // Copy_Traits for SM90 im2col TMA load comes in two layers. // // 1. Copy_Traits<SM90_TMA_LOAD_IM2COL> // 2. Copy_Traits<SM90_TMA_LOAD_IM2COL_OP> // // Copy_Traits<SM90_TMA_LOAD_IM2COL> // is the "outer" layer. It has a TMA descriptor, // but no barrier ("tma_mbar"), so it's "nonexecutable." // One calls its "with" member function with a barrier, // to get an executable "inner"-layer // Copy_Traits<SM90_TMA_LOAD_IM2COL_OP> object. // That object's "copy_unpack" member function // actually invokes im2col TMA load. struct SM90_TMA_LOAD_IM2COL_OP : SM90_TMA_LOAD_IM2COL {}; /// @brief Non-executable specialization of Copy_Traits for SM90 /// im2col TMA load, with TMA descriptor but no barrier. /// /// Use `.with(memory_barrier)` to construct an executable version. template <class NumBitsPerTMA, class TMATensor> struct Copy_Traits<SM90_TMA_LOAD_IM2COL, NumBitsPerTMA, TMATensor> { using ThrID = Layout<_1>; // Map from (src-thr,src-val) to bit using SrcLayout = Layout<Shape<_1, NumBitsPerTMA>>; // Map from (dst-thr,dst-val) to bit using DstLayout = Layout<Shape<_1, NumBitsPerTMA>>; // Reference map from (thr,val) to bit using RefLayout = SrcLayout; Im2ColTmaDescriptor tma_desc_; TMATensor tma_tensor_; CUTE_HOST_DEVICE constexpr Im2ColTmaDescriptor const* get_tma_descriptor() const { return &tma_desc_; } template <class GShape> CUTE_HOST_DEVICE constexpr TMATensor const get_tma_tensor(GShape const&) const { return tma_tensor_; } /// @brief Get an executable specialization. /// /// Copy_Traits specializations with SM90_TMA_LOAD_IM2COL are not /// directly executable. Instead, call this "with" member function /// to get an executable specialization. "Executable" means that /// @c copy_unpack works. /// /// @param tma_mbar Memory barrier for synchronization /// /// @param multicast_mask Multicast mask (unused; only exists /// for interface compatibility with the actual multicast Copy_Traits) /// /// @return Executable specialization of @c Copy_Traits CUTE_HOST_DEVICE constexpr Copy_Traits<SM90_TMA_LOAD_IM2COL_OP, NumBitsPerTMA> with(uint64_t& tma_mbar, [[maybe_unused]] uint16_t const& multicast_mask = 0) const { return {{}, {&tma_desc_, &tma_mbar}}; } // Copy_Traits specializations with SM90_TMA_LOAD_IM2COL // are not directly executable. Instead, call .with // to get an executable specialization. template <class TS, class SLayout, class TD, class DLayout> CUTE_HOST_DEVICE friend constexpr void copy_unpack(Copy_Traits const& traits, Tensor<TS,SLayout> const& src, Tensor<TD,DLayout> & dst) = delete; }; /// @brief Executable specialization of Copy_Traits for SM90 im2col /// TMA load, with TMA descriptor and barrier. template <class NumBitsPerTMA> struct Copy_Traits<SM90_TMA_LOAD_IM2COL_OP, NumBitsPerTMA> : TMA_LOAD_IM2COL_Unpack<SM90_TMA_LOAD_IM2COL_OP> { using ThrID = Layout<_1>; // Map from (src-thr,src-val) to bit using SrcLayout = Layout<Shape<_1, NumBitsPerTMA>>; // Map from (dst-thr,dst-val) to bit using DstLayout = Layout<Shape<_1, NumBitsPerTMA>>; // Reference map from (thr,val) to bit using RefLayout = SrcLayout; // SM90_TMA_LOAD_IM2COL arguments tuple< Im2ColTmaDescriptor const*, uint64_t* // smem mbarrier > const opargs_; }; template <class NumBitsPerTMA, class... Args> struct Copy_Traits<SM90_TMA_LOAD_IM2COL::PREFETCH, NumBitsPerTMA, Args...> : TMA_LOAD_IM2COL_Unpack<SM90_TMA_LOAD_IM2COL::PREFETCH> { using ThrID = Layout<_1>; // Map from (src-thr,src-val) to bit using SrcLayout = Layout<Shape<_1, NumBitsPerTMA>>; // Map from (dst-thr,dst-val) to bit using DstLayout = Layout<Shape<_1, NumBitsPerTMA>>; // Reference map from (thr,val) to bit using RefLayout = SrcLayout; // SM90_TMA_LOAD_IM2COL::PREFETCH arguments tuple<Im2ColTmaDescriptor const*> const opargs_; CUTE_HOST_DEVICE Copy_Traits(Copy_Traits<SM90_TMA_LOAD_IM2COL, NumBitsPerTMA, Args...> const& traits) : opargs_({&traits.tma_desc_}) {} }; ////////////////////////////////////////////////////////////////////////////// ///////////////////////////// TMA_LOAD_MULTICAST ///////////////////////////// ////////////////////////////////////////////////////////////////////////////// struct SM90_TMA_LOAD_IM2COL_MULTICAST_OP : SM90_TMA_LOAD_IM2COL_MULTICAST {}; /// @brief Non-executable specialization of Copy_Traits for SM90 /// im2col TMA load, with TMA descriptor but no barrier or multicast /// mask. /// /// Use `.with(memory_barrier)` to construct an executable version. template <class NumBitsPerTMA, class TMATensor> struct Copy_Traits<SM90_TMA_LOAD_IM2COL_MULTICAST, NumBitsPerTMA, TMATensor> { using ThrID = Layout<_1>; // Map from (src-thr,src-val) to bit using SrcLayout = Layout<Shape<_1, NumBitsPerTMA>>; // Map from (dst-thr,dst-val) to bit using DstLayout = Layout<Shape<_1, NumBitsPerTMA>>; // Reference map from (thr,val) to bit using RefLayout = SrcLayout; Im2ColTmaDescriptor tma_desc_; TMATensor tma_tensor_; CUTE_HOST_DEVICE constexpr Im2ColTmaDescriptor const* get_tma_descriptor() const { return &tma_desc_; } template <class GShape> CUTE_HOST_DEVICE constexpr TMATensor const get_tma_tensor(GShape const&) const { return tma_tensor_; } /// @brief Get an executable specialization. /// /// Copy_Traits specializations with SM90_TMA_LOAD_IM2COL_MULTICAST /// are not directly executable. Instead, call this "with" member /// function to get an executable specialization. "Executable" /// means that @c copy_unpack works. /// /// @param tma_mbar Memory barrier for synchronization /// /// @param multicast_mask Multicast mask (defaults to a single CTA) /// /// @return Executable specialization of @c Copy_Traits CUTE_HOST_DEVICE constexpr Copy_Traits<SM90_TMA_LOAD_IM2COL_MULTICAST_OP, NumBitsPerTMA> with(uint64_t& tma_mbar, uint16_t const& multicast_mask) const { return {{}, {&tma_desc_, &tma_mbar, multicast_mask}}; } // Copy_Traits specializations with SM90_TMA_LOAD_IM2COL_MULTICAST // are not directly executable. Instead, call .with to get an // executable specialization. template <class TS, class SLayout, class TD, class DLayout> CUTE_HOST_DEVICE friend constexpr void copy_unpack(Copy_Traits const& traits, Tensor<TS,SLayout> const& src, Tensor<TD,DLayout> & dst) = delete; }; /// @brief Executable specialization of Copy_Traits for SM90 multicast /// im2col TMA load, with TMA descriptor, barrier, and multicast mask. template <class NumBitsPerTMA> struct Copy_Traits<SM90_TMA_LOAD_IM2COL_MULTICAST_OP, NumBitsPerTMA> : TMA_LOAD_IM2COL_Unpack<SM90_TMA_LOAD_IM2COL_MULTICAST_OP> { using ThrID = Layout<_1>; // Map from (src-thr,src-val) to bit. using SrcLayout = Layout<Shape<_1, NumBitsPerTMA>>; // Map from (dst-thr,dst-val) to bit using DstLayout = Layout<Shape<_1, NumBitsPerTMA>>; // Reference map from (thr,val) to bit using RefLayout = SrcLayout; // SM90_TMA_LOAD_IM2COL_MULTICAST arguments tuple< Im2ColTmaDescriptor const*, uint64_t*, // smem mbarrier uint16_t // multicast mask > const opargs_; }; ////////////////////////////////////////////////////////////////////////////// ///////////////////////////// TMA_STORE IM2COL//////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // The executable SM90_TMA_STORE_IM2COL with tma_desc template <class NumBitsPerTMA, class TMATensor> struct Copy_Traits<SM90_TMA_STORE_IM2COL, NumBitsPerTMA, TMATensor> { using ThrID = Layout<_1>; // Map from (src-thr,src-val) to bit using SrcLayout = Layout<Shape<_1,NumBitsPerTMA>>; // Map from (dst-thr,dst-val) to bit using DstLayout = Layout<Shape<_1,NumBitsPerTMA>>; // Reference map from (thr,val) to bit using RefLayout = SrcLayout; // SM90_TMA_STORE_IM2COL arguments Im2ColTmaDescriptor tma_desc_; TMATensor tma_tensor_; // Return TmaDescriptor/TensorMap CUTE_HOST_DEVICE constexpr Im2ColTmaDescriptor const* get_tma_descriptor() const { return &tma_desc_; } template <class GShape> CUTE_HOST_DEVICE constexpr TMATensor const get_tma_tensor(GShape const&) const { return tma_tensor_; } // This is the copy_unpack dispatch for this Copy_Traits // Src needs to be a smem tensor // Dst needs to be a gmem tensor with TmaCoordIterator .data() template <class TS, class SLayout, class TD, class DLayout> CUTE_HOST_DEVICE friend constexpr void copy_unpack(Copy_Traits const& traits, Tensor<TS,SLayout> const& src, Tensor<TD,DLayout> & dst) { static_assert(is_smem<TS>::value, "Expected smem src for SM90_TMA_STORE_IM2COL"); void const* const desc_ptr = &(traits.tma_desc_); void const* const src_ptr = cute::raw_pointer_cast(src.data()); auto dst_coord = flatten(take<0,3>(dst(Int<0>{}))); return detail::explode_tuple(detail::CallCOPY<SM90_TMA_STORE_IM2COL>{}, make_tuple(desc_ptr, src_ptr), seq<0,1>{}, dst_coord, tuple_seq<decltype(dst_coord)>{}); } }; namespace detail { /// @brief Creates a TMA descriptor for im2col TMA load. /// /// @param tensor_cwhdn Global activation tensor (A matrix of Fprop). /// This is the original (not im2col-transformed) tensor in global /// memory. /// /// @param slayout Rank 2 (M,K) shared memory layout of the activation /// tensor. Here, K is "GEMM K," not the filter tensor's mode of /// the same name. ////// /// @param traversal_stride Traversal strides convolution parameter ////// /// Each of padding_shape, traversal_stride, and dilation_shape is a /// tuple whose size is the number of spatial modes (e.g., 3 for a 5-D /// convolution). /// /// @return TMA descriptor for im2col TMA load template <class EngineA, class LayoutA, class SmemSwizzle, class TMALayout, class LowerCornerStride, class UpperCornerStride, class LowerPaddingStride, class UpperPaddingStride, class TraversalStride, class LowerSRTStride, class DilationStride> CUTE_HOST auto make_im2col_tma_copy_desc( Tensor<EngineA, LayoutA> const& tensor_cwhdn, // (C,W,H,D,N) uint32_t range_c, // TILE_C uint32_t range_whdn, // TILE_WHDN SmemSwizzle const& smem_swizzle, // Swizzle TMALayout const& tma_layout_vt, // TMA layout LowerCornerStride const& lower_corner_whd, // WHD offset of the "base pointer" UpperCornerStride const& upper_corner_whd, // WHD upper corner LowerPaddingStride const& lower_padding_whd, // WHD lower padding UpperPaddingStride const& upper_padding_whd, // WHD upper padding TraversalStride const& stride_whd, // WHD traversal stride LowerSRTStride const& lower_srt, // SRT offset of the "base pointer" DilationStride const& stride_srt) // SRT stride - dilation { static_assert(is_gmem<EngineA>::value, "Tensor must point to GPU global memory."); using value_type = typename EngineA::value_type; constexpr uint32_t num_total_modes = LayoutA::rank; constexpr int num_spatial_modes = num_total_modes - 2; // Gmem starting address void* gmem_address = (void*) raw_pointer_cast(tensor_cwhdn.data()); // Gmem extents are just the tensor shape cute::array<uint64_t, 5> gmem_prob_shape = {1,1,1,1,1}; for_each(make_seq<num_total_modes>{}, [&](auto i) { gmem_prob_shape[i] = static_cast<uint64_t>(shape<i>(tensor_cwhdn)); }); // Gmem strides are byte strides of the activation tensor in CWHDN order cute::array<uint64_t, 5> gmem_prob_stride = {0,0,0,0,0}; for_each(make_seq<num_total_modes>{}, [&](auto i) { gmem_prob_stride[i] = sizeof(value_type) * stride<i>(tensor_cwhdn); }); // Traversal strides are a function of the dilation shape // corresponding to spatial (WHD) modes. cute::array<uint32_t, 5> tma_traversal_strides = {1,1,1,1,1}; for_each(make_seq<num_spatial_modes>{}, [&](auto i) { tma_traversal_strides[i+1] = static_cast<uint32_t>(get<i>(stride_whd)); }); cute::array<int32_t, num_spatial_modes> tma_lower_corner{}; for_each(make_seq<num_spatial_modes>{}, [&](auto i) { tma_lower_corner[i] = static_cast<int32_t>(get<i>(lower_corner_whd)); }); cute::array<int32_t, num_spatial_modes> tma_upper_corner{}; for_each(make_seq<num_spatial_modes>{}, [&](auto i) { tma_upper_corner[i] = static_cast<int32_t>(get<i>(upper_corner_whd)); }); Im2ColTmaDescriptor tma_desc; #if (__CUDACC_VER_MAJOR__ >= 12) CUtensorMapDataType tma_format = TMA::to_CUtensorMapDataType<value_type>(); CUtensorMapInterleave tma_interleave = CU_TENSOR_MAP_INTERLEAVE_NONE; CUtensorMapL2promotion tma_l2Promotion = CU_TENSOR_MAP_L2_PROMOTION_NONE; CUtensorMapFloatOOBfill tma_oob_fill = CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE; CUtensorMapSwizzle tma_swizzle = TMA::to_CUtensorMapSwizzle(detail::get_tma_swizzle_bits(smem_swizzle)); CUresult encode_result = cuTensorMapEncodeIm2col( &tma_desc, tma_format, num_total_modes, gmem_address, gmem_prob_shape.data(), gmem_prob_stride.data() + 1, // gmem_prob_stride[0] implicitly sizeof(value_type) tma_lower_corner.data(), tma_upper_corner.data(), range_c, range_whdn, tma_traversal_strides.data(), tma_interleave, tma_swizzle, tma_l2Promotion, tma_oob_fill); // The extra asserts help indicate the error's cause. assert(encode_result != CUDA_ERROR_DEINITIALIZED); assert(encode_result != CUDA_ERROR_NOT_INITIALIZED); assert(encode_result != CUDA_ERROR_INVALID_CONTEXT); assert(encode_result != CUDA_ERROR_INVALID_VALUE); assert(encode_result == CUDA_SUCCESS); #endif // (__CUDACC_VER_MAJOR__ >= 12) // // Calculate gemm shapes and linearized shapes based on tma layout tiling. // // Compute [w, h, d, n] // q/p/z = (w/h/d + (upper_corner_whd - lower_corner_whd - 1)) / stride_whd + 1 auto gemm_mn_ = cute::transform(cute::make_seq<num_spatial_modes>{}, [&](auto i) { return (shape<i+1>(tensor_cwhdn) + get<i>(upper_corner_whd) - get<i>(lower_corner_whd) - Int<1>{}) / get<i>(stride_whd) + Int<1>{}; }); auto gemm_mn = append(gemm_mn_, shape<num_spatial_modes+1>(tensor_cwhdn)); // Compute [c, s, r, t] // fprop/wgrad, s/r/t = 1 + (upper_padding_whd - upper_corner_whd) / stride_srt // wgrad, s/r/t = 1 + (lower_padding_whd - lower_corner_whd) / stride_srt auto gemm_k_ = cute::transform(cute::make_seq<num_spatial_modes>{}, [&](auto i) { auto padding_size = conditional_return(get<i>(stride_srt) > Int<0>{}, get<i>(upper_padding_whd) - get<i>(upper_corner_whd), get<i>(lower_corner_whd) - get<i>(lower_padding_whd)); return Int<1>{} + padding_size / get<i>(stride_srt); }); auto gemm_k = prepend(gemm_k_, shape<0>(tensor_cwhdn)); // For fprop/dgrad kernel, gemm_shapes is ((q, p, z, n), (c, s, r, t)) // For wgrad kernel, gemm_shapes is ((c, s, r, t), (q, p, z, n)) auto gemm_shapes_common = make_shape(gemm_mn, gemm_k); auto gemm_shapes = make_shape( basis_get(stride<0,1>(tma_layout_vt), gemm_shapes_common), basis_get(stride<0,0>(tma_layout_vt), gemm_shapes_common)); // For fprop/dgrad kernel, linearized shapes is (whdn, (c, s, r, t)) // For wgrad kernel linearized shapes is ((c, s, r, t), whdn) auto linear_shapes_common = make_shape(size(gemm_mn), gemm_k); auto linear_shapes = make_shape( basis_get(stride<0,1>(tma_layout_vt), linear_shapes_common), basis_get(stride<0,0>(tma_layout_vt), linear_shapes_common)); // // Calculate gmem basis stride based on tma layout tiling. // auto tma_basis_scale = make_shape(Int<1>{}, stride_whd, Int<1>{}, stride_srt); auto tma_basis = elem_scale(tma_basis_scale, make_basis_like(tma_basis_scale)); auto gbasis_strides_common = make_stride( append(get<1>(tma_basis), get<2>(tma_basis)), prepend(get<3>(tma_basis), get<0>(tma_basis))); // ((w,h,d,n),(c,s,r,t)) auto gbasis_strides = make_stride( basis_get(stride<0,1>(tma_layout_vt), gbasis_strides_common), basis_get(stride<0,0>(tma_layout_vt), gbasis_strides_common)); // // Create tma tensor // auto lower_corner = make_arithmetic_tuple(Int<0>{}, lower_corner_whd, Int<0>{}, lower_srt); auto tensor_multimode = make_tensor(ArithmeticTupleIterator(lower_corner), gemm_shapes, gbasis_strides); auto tensor_linear = make_identity_tensor(linear_shapes); auto tma_tensor = make_tensor(tensor_multimode.data(), composition( tensor_multimode.layout(), tensor_linear(Int<0>{}), tensor_linear.layout())); return cute::make_tuple(tma_desc, tma_tensor); } template <class CopyOp, class GEngine, class GLayout, class SLayout, class VShape, class VStride, class LowerCornerStride, class UpperCornerStride, class LowerPaddingStride, class UpperPaddingStride, class TraversalStride, class LowerSRTStride, class DilationStride> CUTE_HOST_RTC auto make_tma_atom_im2col(CopyOp, Tensor<GEngine,GLayout> const& gtensor, // Full GMEM Tensor: ((w, h, d, n), c) SLayout const& slayout, // CTA Tile of SMEM, potentially swizzled int32_t const& num_multicast, // The number of CTAs involved in multicasting Layout<VShape,VStride> const& cta_v_map, // V: CTA val idx -> gmem mode LowerCornerStride const& lower_corner_whd, UpperCornerStride const& upper_corner_whd, LowerPaddingStride const& lower_padding_whd, UpperPaddingStride const& upper_padding_whd, TraversalStride const& stride_whd, // traversal stride LowerSRTStride const& lower_srt, DilationStride const& stride_srt) // dilation { // // TMA parameter checking // CUTE_STATIC_ASSERT_V(product_each(shape(slayout)) == product_each(shape(cta_v_map)), "TMA requires CTA_Tile and SLayout top-level shape equivalence."); // // TMA slayout manipulation // // Invert the smem to get the largest contiguous vector in the smem layout auto inv_smem_layout = right_inverse(get_nonswizzle_portion(slayout)); // trunc_smem_idx -> trunc_smem_coord // Map from smem idx to a gmem mode auto sidx_to_gmode = coalesce(composition(cta_v_map, inv_smem_layout)); #if 0 print("g_layout : "); print(gtensor.layout()); print("\n"); print("s_layout : "); print(slayout); print("\n"); print("cta_t_map : "); print(cta_t_map); print("\n"); print("cta_v_map : "); print(cta_v_map); print("\n"); print("inv_smem : "); print(inv_smem_layout); print("\n"); print("sidx_to_gmode : "); print(sidx_to_gmode); print("\n"); #endif // // TMA gtensor manipulation // // Generate a TupleBasis for the gtensor auto glayout_basis = make_identity_layout(product_each(shape(gtensor))); // Tile the modes of gtensor with the truncated cta_v_map o inv_smem_layout_trunc auto tma_layout_full = flatten(composition(glayout_basis, sidx_to_gmode)); // Truncate any incompatibilities -- no starting in the middle of gmodes auto smem_rank = find_if(stride(tma_layout_full), [](auto e) { [[maybe_unused]] auto v = basis_value(e); return not is_constant<1,decltype(v)>{}; }); static_assert(smem_rank >= 2, "IM2COL expects at least 2 modes of the smem to vectorize with gmem."); // IM2COL uses a maximum of 2 modes constexpr int smem_tma_rank = cute::min(int(smem_rank), 2); // Keep only the static-1 basis modes into gmem auto tma_layout_trunc = take<0,smem_tma_rank>(tma_layout_full); // Split according to the portion each multicast CTA will be responsible for auto tma_layout_vt = logical_divide(tma_layout_trunc, shape_div(size(tma_layout_trunc), num_multicast)); #if 0 print("glayout_basis : "); print(glayout_basis); print("\n"); print("tma_layout_full : "); print(tma_layout_full); print("\n"); print("tma_layout_trunc: "); print(tma_layout_trunc); print("\n"); print("tma_layout_vt : "); print(tma_layout_vt); print("\n"); #endif auto range_c = size<0,0>(tma_layout_vt); auto range_whdn = size<0,1>(tma_layout_vt); Tensor gtensor_cwhdn = make_tensor(gtensor.data(), flatten(make_layout(basis_get(stride<0,0>(tma_layout_vt), gtensor.layout()), basis_get(stride<0,1>(tma_layout_vt), gtensor.layout())))); auto [tma_desc, tma_tensor] = make_im2col_tma_copy_desc( gtensor_cwhdn, range_c, range_whdn, detail::get_swizzle_portion(slayout), tma_layout_vt, lower_corner_whd, upper_corner_whd, lower_padding_whd, upper_padding_whd, stride_whd, lower_srt, stride_srt); // // Construct the Copy_Traits // using T = typename GEngine::value_type; constexpr int num_bits_per_tma = decltype(size(tma_layout_trunc))::value * sizeof(T) * 8; using Traits = Copy_Traits<CopyOp, cute::C<num_bits_per_tma>, decltype(tma_tensor)>; using Atom = Copy_Atom<Traits, typename GEngine::value_type>; #if 0 print("num_bits : "); print(num_bits_per_tma); print("\n"); #endif Traits tma_traits{tma_desc, tma_tensor}; // Return the Copy_Atom return Atom{tma_traits}; } /// Make a TiledCopy for im2col TMA load. /// /// @param copy_op The copy implementation: either /// SM90_TMA_LOAD_IM2COL or SM90_TMA_LOAD_IM2COL_MULTICAST. /// /// @param tensor_cwhdn The global tensor to use for im2col TMA loads. /// For Fprop convolutions, this is the activation tensor. This is /// the "original tensor that points to global memory, not the /// coordinate (im2col-transformed) tensor. /// /// @param slayout Layout of shared memory tile. /// /// @param stride_whd The traversal strides convolution /// parameter. /// /// @return TiledCopy specialization for im2col TMA loads. template <class CopyOp, class GEngine, class GLayout, class SLayout, class TShape, class TStride, class VShape, class VStride, class LowerCornerStride, class UpperCornerStride, class LowerPaddingStride, class UpperPaddingStride, class TraversalStride, class LowerSRTStride, class DilationStride> CUTE_HOST_RTC auto make_tma_copy_im2col(CopyOp const& copy_op, Tensor<GEngine,GLayout> const& gtensor, SLayout const& slayout, Layout<TShape,TStride> const& cta_t_map, // CTA tid -> logical TMA tid Layout<VShape,VStride> const& cta_v_map, // CTA vid -> gmem coord LowerCornerStride const& lower_corner_whd, UpperCornerStride const& upper_corner_whd, LowerPaddingStride const& lower_padding_whd, UpperPaddingStride const& upper_padding_whd, TraversalStride const& stride_whd, // traversal stride LowerSRTStride const& lower_srt, DilationStride const& stride_srt) // dilation { // // TMA parameter checking // CUTE_STATIC_ASSERT_V(size(slayout) % cosize(cta_t_map) == Int<0>{}, "Number of active CTAs in TMA must divide domain size of slayout."); Copy_Atom atom = make_tma_atom_im2col(copy_op, gtensor, slayout, cosize(cta_t_map), cta_v_map, lower_corner_whd, upper_corner_whd, lower_padding_whd, upper_padding_whd, stride_whd, lower_srt, stride_srt); // // Construct the TiledCopy // auto cta_tiler = product_each(shape(cta_v_map)); auto num_elems_per_tma = size<1>(typename decltype(atom)::RefLayout{}) / static_value<sizeof_bits<typename GEngine::value_type>>(); // smem idx -> smem coord auto inv_smem_layout = right_inverse(get_nonswizzle_portion(slayout)); // CTA V -> smem_coord auto layout_v = composition(inv_smem_layout, num_elems_per_tma); // Scale that up to cover all of the smem_coords auto layout_V = tile_to_shape(make_layout(layout_v), size(cta_v_map)); // CTA T -> smem idx auto layout_t = make_layout(cosize(cta_t_map), shape_div(num_elems_per_tma, cosize(cta_t_map))); // CTA TID -> smem coord auto layout_T = composition(inv_smem_layout, composition(layout_t, cta_t_map)); // Combine with the T mapping [[maybe_unused]] auto layout_TV = make_layout(layout_T, layout_V); #if 0 print("cta_tiler : "); print(cta_tiler); print("\n"); print("layout_v : "); print(layout_v); print("\n"); print("layout_V : "); print(layout_V); print("\n"); print("layout_t : "); print(layout_t); print("\n"); print("layout_T : "); print(layout_T); print("\n"); print("layout_TV : "); print(layout_TV); print("\n"); #endif return TiledCopy<decltype(atom), decltype(layout_TV), decltype(cta_tiler)>{atom}; } /// Make a TiledCopy for im2col TMA with no offsets. /// E.g. im2col TMA load for C and im2col TMA store for D. template <class CopyOp, class GEngine, class GLayout, class SLayout, class TShape, class TStride, class VShape, class VStride> CUTE_HOST_RTC auto make_tma_copy_im2col(CopyOp const& copy_op, Tensor<GEngine,GLayout> const& gtensor, SLayout const& slayout, Layout<TShape,TStride> const& cta_t_map, // CTA tid -> logical TMA tid Layout<VShape,VStride> const& cta_v_map) // CTA vid -> gmem coord { constexpr int num_spatial_modes = rank<0>(GLayout{}) - 1; return make_tma_copy_im2col(copy_op, gtensor, slayout, cta_t_map, cta_v_map, append<num_spatial_modes>(Stride<_0>{}, Int<0>{}), // lower_corner_whd append<num_spatial_modes>(Stride<_0>{}, Int<0>{}), // upper_corner_whd append<num_spatial_modes>(Stride<_0>{}, Int<0>{}), // lower_padding_whd append<num_spatial_modes>(Stride<_0>{}, Int<0>{}), // upper_padding_whd append<num_spatial_modes>(Stride<_1>{}, Int<1>{}), // stride_whd append<num_spatial_modes>(Stride<_0>{}, Int<0>{}), // lower_srt append<num_spatial_modes>(Stride<_1>{}, Int<1>{})); // stride_srt } } // namespace detail template <class CopyOp, class Engine0, class Layout0, class SLayout, class CTATiler, class MulticastSize, class LowerCornerStride, class UpperCornerStride, class LowerPaddingStride, class UpperPaddingStride, class TraversalStride, class LowerSRTStride, class DilationStride> CUTE_HOST_RTC auto make_im2col_tma_copy(CopyOp const& copy_op, Tensor<Engine0, Layout0> const& tensor_cwhdn, SLayout const& slayout, CTATiler const& cta_tiler, MulticastSize const& multicast_size, LowerCornerStride const& lower_corner_whd, UpperCornerStride const& upper_corner_whd, LowerPaddingStride const& lower_padding_whd, UpperPaddingStride const& upper_padding_whd, TraversalStride const& stride_whd, LowerSRTStride const& lower_srt, DilationStride const& stride_srt) { auto cta_v_tile = make_identity_layout(product_each(shape(tensor_cwhdn))).compose(cta_tiler); auto cta_t_tile = make_layout(multicast_size); return detail::make_tma_copy_im2col(copy_op, tensor_cwhdn, slayout, cta_t_tile, cta_v_tile, lower_corner_whd, upper_corner_whd, lower_padding_whd, upper_padding_whd, stride_whd, lower_srt, stride_srt); } // Explicit default for multicast_size template <class CopyOp, class Engine0, class Layout0, class SLayout, class CTATiler, class LowerCornerStride, class UpperCornerStride, class LowerPaddingStride, class UpperPaddingStride, class TraversalStride, class LowerSRTStride, class DilationStride> CUTE_HOST_RTC auto make_im2col_tma_copy(CopyOp const& copy_op, Tensor<Engine0, Layout0> const& tensor_cwhdn, SLayout const& slayout, CTATiler const& cta_tiler, LowerCornerStride const& lower_corner_whd, UpperCornerStride const& upper_corner_whd, LowerPaddingStride const& lower_padding_whd, UpperPaddingStride const& upper_padding_whd, TraversalStride const& stride_whd, LowerSRTStride const& lower_srt, DilationStride const& stride_srt) { return make_im2col_tma_copy(copy_op, tensor_cwhdn, slayout, cta_tiler, Int<1>{}, lower_corner_whd, upper_corner_whd, lower_padding_whd, upper_padding_whd, stride_whd, lower_srt, stride_srt); } // Explicit default for cta_tiler and multicast_size template <class CopyOp, class Engine0, class Layout0, class SLayout, class LowerCornerStride, class UpperCornerStride, class LowerPaddingStride, class UpperPaddingStride, class TraversalStride, class LowerSRTStride, class DilationStride> CUTE_HOST_RTC auto make_im2col_tma_copy(CopyOp const& copy_op, Tensor<Engine0, Layout0> const& tensor_cwhdn, SLayout const& slayout, LowerCornerStride const& lower_corner_whd, UpperCornerStride const& upper_corner_whd, LowerPaddingStride const& lower_padding_whd, UpperPaddingStride const& upper_padding_whd, TraversalStride const& stride_whd, LowerSRTStride const& lower_srt, DilationStride const& stride_srt) { return make_im2col_tma_copy(copy_op, tensor_cwhdn, slayout, product_each(shape(slayout)), Int<1>{}, lower_corner_whd, upper_corner_whd, lower_padding_whd, upper_padding_whd, stride_whd, lower_srt, stride_srt); } // No offsets copy. template <class CopyOp, class Engine0, class Layout0, class SLayout, class CTATiler, class MulticastSize> CUTE_HOST_RTC auto make_im2col_tma_copy(CopyOp const& copy_op, Tensor<Engine0, Layout0> const& tensor_cwhdn, SLayout const& slayout, CTATiler const& cta_tiler, MulticastSize const& multicast_size) { auto cta_v_tile = make_identity_layout(product_each(shape(tensor_cwhdn))).compose(cta_tiler); auto cta_t_tile = make_layout(multicast_size); return detail::make_tma_copy_im2col(copy_op, tensor_cwhdn, slayout, cta_t_tile, cta_v_tile); } // Explicit default for multicast_size template <class CopyOp, class Engine0, class Layout0, class SLayout, class CTATiler> CUTE_HOST_RTC auto make_im2col_tma_copy(CopyOp const& copy_op, Tensor<Engine0, Layout0> const& tensor_cwhdn, SLayout const& slayout, CTATiler const& cta_tiler) { return make_im2col_tma_copy(copy_op, tensor_cwhdn, slayout, cta_tiler, Int<1>{}); } // Explicit default for cta_tiler and multicast_size template <class CopyOp, class Engine0, class Layout0, class SLayout> CUTE_HOST_RTC auto make_im2col_tma_copy(CopyOp const& copy_op, Tensor<Engine0, Layout0> const& tensor_cwhdn, SLayout const& slayout) { return make_im2col_tma_copy(copy_op, tensor_cwhdn, slayout, product_each(shape(slayout)), Int<1>{}); } } // namespace cute
include/cute/atom/copy_traits_sm90_im2col.hpp/0
{ "file_path": "include/cute/atom/copy_traits_sm90_im2col.hpp", "repo_id": "include", "token_count": 16941 }
15
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Portable bit field that supports byte and word straddling that can be used in unions to bit-wise define parameters. */ #pragma once #include <cute/config.hpp> #include <cute/numeric/numeric_types.hpp> // uint_bit_t namespace cute { class dummy_type {}; template <uint32_t BitStart, uint32_t NumBits, class OtherValueType = dummy_type> struct bit_field { static_assert(0 < NumBits && NumBits <= 64, "bit_fields with more than 64 bits are not supported."); // value_type: Use the smallest value type that fits NumBits static constexpr uint32_t value_type_bits = (NumBits <= 8) ? 8 : (NumBits <= 16) ? 16 : (NumBits <= 32) ? 32 : 64; using value_type = cute::uint_bit_t<value_type_bits>; // storage_type: Use the smallest storage_type that avoids boundary crossing static constexpr uint32_t storage_type_bits = (BitStart / 8 == (BitStart + NumBits - 1) / 8) ? 8 : (BitStart / 16 == (BitStart + NumBits - 1) / 16) ? 16 : (BitStart / 32 == (BitStart + NumBits - 1) / 32) ? 32 : 64; using storage_type = cute::uint_bit_t<storage_type_bits>; static_assert(sizeof(OtherValueType) == sizeof(value_type) || is_same<OtherValueType,dummy_type>::value, "sizeof(OtherValueType) must be same as sizeof(value_type)."); // Number of storage values needed: ceil_div(BitStart + NumBits, storage_type_bits) static constexpr uint32_t N = (BitStart + NumBits + storage_type_bits - 1) / storage_type_bits; // Index of storage value for BitStart static constexpr uint32_t idx = BitStart / storage_type_bits; // Bit of data_[idx] for BitStart static constexpr uint32_t bit_lo = BitStart % storage_type_bits; // Number of bits in data_[idx] used for NumBits if straddling, else 0 static constexpr uint32_t bit_hi = (idx + 1 < N) ? (storage_type_bits - bit_lo) : 0; public: // NumBits mask static constexpr value_type mask = value_type(uint64_t(-1) >> (64u - NumBits)); // NumBits mask for BitStart static constexpr storage_type mask_lo = storage_type(mask) << bit_lo; // NumBits mask for leftover bits in data_[idx+1] if straddling, else 0 static constexpr storage_type mask_hi = (idx + 1 < N) ? (storage_type(mask) >> bit_hi) : 0; storage_type data_[N]; // Get value CUTE_HOST_DEVICE constexpr value_type get() const { storage_type result = (data_[idx] & mask_lo) >> bit_lo; if constexpr (bit_hi != 0) { result |= (data_[idx+1] & mask_hi) << bit_hi; } return static_cast<value_type>(result); } // Set value CUTE_HOST_DEVICE constexpr void set(value_type x) { storage_type item = static_cast<storage_type>(x & mask); data_[idx] = static_cast<storage_type>((data_[idx] & ~mask_lo) | (item << bit_lo)); if constexpr (bit_hi != 0) { data_[idx+1] = static_cast<storage_type>((data_[idx+1] & ~mask_hi) | (item >> bit_hi)); } } // Assign value CUTE_HOST_DEVICE constexpr bit_field& operator=(value_type x) { set(x); return *this; } // Cast to value CUTE_HOST_DEVICE constexpr operator value_type () const { return get(); } // Assign OtherValueType CUTE_HOST_DEVICE constexpr bit_field& operator=(OtherValueType x) { return *this = *reinterpret_cast<value_type*>(&x); } // Cast to OtherValueType CUTE_HOST_DEVICE constexpr operator OtherValueType () const { value_type x = get(); return *reinterpret_cast<OtherValueType*>(&x); } }; } // end namespace cute
include/cute/container/bit_field.hpp/0
{ "file_path": "include/cute/container/bit_field.hpp", "repo_id": "include", "token_count": 1951 }
16
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/util/type_traits.hpp> #include <cute/numeric/numeric_types.hpp> // sizeof_bits #include <cute/numeric/math.hpp> #include <cute/numeric/integral_constant.hpp> #include <cute/container/array_subbyte.hpp> #include <cute/pointer_base.hpp> #include <cute/pointer_swizzle.hpp> namespace cute { // // recast_ptr<T> -- Create an iterator over values of type T. // For most types this will simply be T*, but certain types require more care. // Subbyte Types: uint2_t, uint4_t, etc // Requires construction of a subbyte_iterator<T> in order to properly // resolve each element in byte-addressed memory. // template <class NewT> CUTE_HOST_DEVICE constexpr auto recast_ptr(void* ptr) { if constexpr (cute::is_subbyte_v<NewT>) { return subbyte_iterator<NewT>(ptr); } else { return reinterpret_cast<NewT*>(ptr); } CUTE_GCC_UNREACHABLE; } template <class NewT> CUTE_HOST_DEVICE constexpr auto recast_ptr(void const* ptr) { if constexpr (cute::is_subbyte_v<NewT>) { return subbyte_iterator<NewT const>(ptr); } else { return reinterpret_cast<NewT const*>(ptr); } CUTE_GCC_UNREACHABLE; } // Disambiguate nullptr template <class NewT> CUTE_HOST_DEVICE constexpr auto recast_ptr(decltype(nullptr)) { // nullptr_t return recast_ptr<NewT>(static_cast<NewT*>(nullptr)); } // // gmem_ptr // template <class P> struct gmem_ptr : iter_adaptor<P, gmem_ptr<P>> { using iter_adaptor<P, gmem_ptr<P>>::iter_adaptor; }; template <class T, class = void> struct is_gmem : false_type {}; template <class P> // Found the gmem struct is_gmem<gmem_ptr<P>> : true_type {}; template <class P> // Recurse on ::iterator, if possible struct is_gmem<P, void_t<typename P::iterator>> : is_gmem<typename P::iterator> {}; template <class P> constexpr bool is_gmem_v = is_gmem<P>::value; // Idempotent gmem tag on an iterator template <class Iterator> CUTE_HOST_DEVICE constexpr auto make_gmem_ptr(Iterator iter) { if constexpr (is_gmem<Iterator>::value) { return iter; } else { return gmem_ptr<Iterator>{iter}; } CUTE_GCC_UNREACHABLE; } // Explicitly typed construction from a raw pointer template <class T> CUTE_HOST_DEVICE constexpr auto make_gmem_ptr(void* ptr) { return make_gmem_ptr(recast_ptr<T>(ptr)); } // Explicitly typed construction from a raw pointer template <class T> CUTE_HOST_DEVICE constexpr auto make_gmem_ptr(void const* ptr) { return make_gmem_ptr(recast_ptr<T const>(ptr)); } // nullptr_t overload for make_gmem_ptr<float>(nullptr) disambiguation template <class T> CUTE_HOST_DEVICE constexpr auto make_gmem_ptr(decltype(nullptr)) { // nullptr_t return make_gmem_ptr(recast_ptr<T>(nullptr)); } // The gmem tag is invariant over type-recast template <class NewT, class P> CUTE_HOST_DEVICE constexpr auto recast_ptr(gmem_ptr<P> const& ptr) { return make_gmem_ptr(recast_ptr<NewT>(ptr.get())); } // // smem_ptr // template <class P> struct smem_ptr : iter_adaptor<P, smem_ptr<P>> { using iter_adaptor<P, smem_ptr<P>>::iter_adaptor; }; template <class T, class = void> struct is_smem : false_type {}; template <class P> // Found the smem struct is_smem<smem_ptr<P>> : true_type {}; template <class P> // Recurse on ::iterator, if possible struct is_smem<P, void_t<typename P::iterator>> : is_smem<typename P::iterator> {}; template <class P> constexpr bool is_smem_v = is_smem<P>::value; // Idempotent smem tag on an iterator template <class Iterator> CUTE_HOST_DEVICE constexpr auto make_smem_ptr(Iterator iter) { if constexpr (is_smem<Iterator>::value) { return iter; } else { return smem_ptr<Iterator>{iter}; } CUTE_GCC_UNREACHABLE; } // Make a smem swizzle pointer, common operation template <class Iterator, class Swizzle> CUTE_HOST_DEVICE constexpr auto make_smem_ptr(Iterator ptr, Swizzle sw) { return make_swizzle_ptr(make_smem_ptr(ptr), sw); } // Explicitly typed construction from a raw pointer template <class T> CUTE_HOST_DEVICE constexpr auto make_smem_ptr(void* ptr) { return make_smem_ptr(recast_ptr<T>(ptr)); } // Explicitly typed construction from a raw pointer template <class T> CUTE_HOST_DEVICE constexpr auto make_smem_ptr(void const* ptr) { return make_smem_ptr(recast_ptr<T const>(ptr)); } // The smem tag is invariant over type-recast template <class NewT, class P> CUTE_HOST_DEVICE constexpr auto recast_ptr(smem_ptr<P> const& ptr) { return make_smem_ptr(recast_ptr<NewT>(ptr.get())); } // // rmem_ptr // template <class P> struct rmem_ptr : iter_adaptor<P, rmem_ptr<P>> { using iter_adaptor<P, rmem_ptr<P>>::iter_adaptor; }; // Anything that is not gmem or smem is rmem template <class T, class = void> struct is_rmem : bool_constant<not (is_gmem<T>::value || is_smem<T>::value)> {}; template <class P> struct is_rmem<rmem_ptr<P>> : true_type {}; template <class P> constexpr bool is_rmem_v = is_rmem<P>::value; // Idempotent rmem tag on an iterator template <class Iterator> CUTE_HOST_DEVICE constexpr auto make_rmem_ptr(Iterator iter) { if constexpr (is_rmem<Iterator>::value) { return iter; } else { return rmem_ptr<Iterator>{iter}; } CUTE_GCC_UNREACHABLE; } // Explicitly typed construction from a raw pointer template <class T> CUTE_HOST_DEVICE constexpr auto make_rmem_ptr(void* ptr) { return make_rmem_ptr(recast_ptr<T>(ptr)); } // Explicitly typed construction from a raw pointer template <class T> CUTE_HOST_DEVICE constexpr auto make_rmem_ptr(void const* ptr) { return make_rmem_ptr(recast_ptr<T const>(ptr)); } // The rmem tag is invariant over type-recast template <class NewT, class P> CUTE_HOST_DEVICE constexpr auto recast_ptr(rmem_ptr<P> const& ptr) { return make_rmem_ptr(recast_ptr<NewT>(ptr.get())); } // // Display utilities // template <class T> CUTE_HOST_DEVICE void print(gmem_ptr<T> ptr) { printf("gmem_"); print(ptr.get()); } template <class T> CUTE_HOST_DEVICE void print(smem_ptr<T> ptr) { printf("smem_"); print(ptr.get()); } template <class T> CUTE_HOST_DEVICE void print(rmem_ptr<T> ptr) { printf("rmem_"); print(ptr.get()); } #if !defined(__CUDACC_RTC__) template <class T> CUTE_HOST std::ostream& operator<<(std::ostream& os, gmem_ptr<T> ptr) { return os << "gmem_[" << int(sizeof_bits<iter_value_t<T>>::value) << "b]"; } template <class T> CUTE_HOST std::ostream& operator<<(std::ostream& os, smem_ptr<T> ptr) { return os << "smem_[" << int(sizeof_bits<iter_value_t<T>>::value) << "b]"; } template <class T> CUTE_HOST std::ostream& operator<<(std::ostream& os, rmem_ptr<T> ptr) { return os << "rmem_[" << int(sizeof_bits<iter_value_t<T>>::value) << "b]"; } #endif // !defined(__CUDACC_RTC__) } // end namespace cute
include/cute/pointer.hpp/0
{ "file_path": "include/cute/pointer.hpp", "repo_id": "include", "token_count": 3172 }
17
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cuComplex.h> #include <cuda_fp16.h> #if defined(__CUDACC_RTC__) #include <cuda/std/cstdint> #else #include <cstdint> #endif #include "cutlass/cutlass.h" #include "cutlass/functional.h" #include "cutlass/real.h" #include "cutlass/numeric_types.h" #include "cutlass/fast_math.h" #if !defined(__CUDACC_RTC__) #include <iosfwd> #endif namespace cutlass { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Enumeraed type describing a transformation on a complex value. enum class ComplexTransform { kNone, kConjugate }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines ComplexTransform inversions template <ComplexTransform kTransform> struct InvertComplexTransform; /// Invert ComplexTransform from kNone to kConjugate template <> struct InvertComplexTransform<ComplexTransform::kNone> { static ComplexTransform const transform = ComplexTransform::kConjugate; }; /// Invert ComplexTransform from kConjugate to kNone template <> struct InvertComplexTransform<ComplexTransform::kConjugate> { static ComplexTransform const transform = ComplexTransform::kNone; }; ///////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// // // Accessors for CUDA complex types // #if !defined(__CUDACC_RTC__) /// Returns the real part of the complex number CUTLASS_HOST_DEVICE float const &real(cuFloatComplex const &z) { return z.x; } /// Returns the real part of the complex number CUTLASS_HOST_DEVICE float &real(cuFloatComplex &z) { return z.x; } /// Returns the real part of the complex number CUTLASS_HOST_DEVICE double const &real(cuDoubleComplex const &z) { return z.x; } /// Returns the real part of the complex number CUTLASS_HOST_DEVICE double &real(cuDoubleComplex &z) { return z.x; } /// Returns the imaginary part of the complex number CUTLASS_HOST_DEVICE float const &imag(cuFloatComplex const &z) { return z.y; } /// Returns the imaginary part of the complex number CUTLASS_HOST_DEVICE float &imag(cuFloatComplex &z) { return z.y; } /// Returns the imaginary part of the complex number CUTLASS_HOST_DEVICE double const &imag(cuDoubleComplex const &z) { return z.y; } /// Returns the imaginary part of the complex number CUTLASS_HOST_DEVICE double &imag(cuDoubleComplex &z) { return z.y; } #endif /////////////////////////////////////////////////////////////////////////////////////////////////// /// Class for representing and manipulating complex numbers with conversions from built-in CUDA /// complex types. template <typename T> class complex { public: /// Type alias for scalar type using value_type = T; private: // // Data members // /// Real part T _real; /// Imaginary part T _imag; public: // // Methods // /// Default constructor complex() = default; /// Constructor CUTLASS_HOST_DEVICE complex(T r) : _real(r), _imag(T(0)) {} /// Constructor CUTLASS_HOST_DEVICE complex(T r, T i) : _real(r), _imag(i) {} /// Constructor template<typename A> CUTLASS_HOST_DEVICE complex(complex<A> const &z) : _real(static_cast<T>(z.real())), _imag(static_cast<T>(z.imag())) {} #if !defined(__CUDACC_RTC__) /// Conversion from cuFloatComplex CUTLASS_HOST_DEVICE complex(cuFloatComplex const &z) : _real(static_cast<T>(cuCrealf(z))), _imag(static_cast<T>(cuCimagf(z))) {} /// Conversion from cuDoubleComplex CUTLASS_HOST_DEVICE complex(cuDoubleComplex const &z) : _real(static_cast<T>(cuCreal(z))), _imag(static_cast<T>(cuCimag(z))) {} #endif /// Equality operator CUTLASS_HOST_DEVICE bool operator==(complex<T> const &rhs) const { return this->real() == rhs.real() && this->imag() == rhs.imag(); } /// Inequality operator CUTLASS_HOST_DEVICE bool operator!=(complex<T> const &rhs) const { return !(*this == rhs); } /// Addition template <typename A> CUTLASS_HOST_DEVICE complex<T> operator+(complex<A> const &rhs) const { return complex<T>(this->real() + rhs.real(), this->imag() + rhs.imag()); } /// Reduction into memory address. Components may update out of order. template <typename OtherT> CUTLASS_DEVICE void red(complex<OtherT> *ptr) const { static_assert(platform::is_same<T, OtherT>::value, "Component type must match"); cutlass::atomic_add<T> reduce; reduce(&ptr->_real, _real); reduce(&ptr->_imag, _imag); } /// Reduction into memory address. Components may update out of order. (Half specialization) CUTLASS_DEVICE void red(complex<half_t> *ptr) const { static_assert(platform::is_same<T, half_t>::value, "Component type must match"); half2 *h2_ptr = reinterpret_cast<half2*>(ptr); half2 h2_data = reinterpret_cast<half2&>(*this); cutlass::atomic_add<half2> reduce; reduce(h2_ptr, h2_data); } /// Subtraction template <typename A> CUTLASS_HOST_DEVICE complex<T> operator-(complex<A> const &rhs) const { return complex<T>(this->real() - rhs.real(), this->imag() - rhs.imag()); } /// Multiplication template <typename A> CUTLASS_HOST_DEVICE complex<T> operator*(complex<A> const &rhs) const { return complex<T>(this->real() * rhs.real() - this->imag() * rhs.imag(), this->real() * rhs.imag() + this->imag() * rhs.real()); } /// Scalar Multiplication template <typename A> CUTLASS_HOST_DEVICE complex<T> operator*(A const &s) const { return complex<T>(this->real() * s, this->imag() * s); } /// Division template <typename A> CUTLASS_HOST_DEVICE complex<T> operator/(complex<A> const &rhs) const { T d = T(rhs.real() * rhs.real() + rhs.imag() * rhs.imag()); return complex<T>( (real() * rhs.real() + imag() * rhs.imag()) / d, (imag() * rhs.real() - real() * rhs.imag()) / d ); } /// Scalar Division template <typename A> CUTLASS_HOST_DEVICE complex<T> operator/(A const &s) const { return complex<T>(this->real() / s, this->imag() / s); } /// Addition template <typename A> CUTLASS_HOST_DEVICE complex<T> &operator+=(complex<A> const &rhs) { *this = *this + rhs; return *this; } /// Subtraction template <typename A> CUTLASS_HOST_DEVICE complex<T> &operator-=(complex<A> const &rhs) { *this = *this - rhs; return *this; } /// Multiplication template <typename A> CUTLASS_HOST_DEVICE complex<T> &operator*=(complex<A> const &rhs) { *this = *this * rhs; return *this; } /// Scalar multiplication template <typename A> CUTLASS_HOST_DEVICE complex<T> &operator*=(A s) { *this = *this * s; return *this; } /// Division template <typename A> CUTLASS_HOST_DEVICE complex<T> &operator/=(complex<A> const &rhs) { *this = *this / rhs; return *this; } /// Accesses the real part of the complex number CUTLASS_HOST_DEVICE T const &real() const { return _real; } /// Accesses the real part of the complex number CUTLASS_HOST_DEVICE T &real() { return _real; } /// Accesses the imaginary part of the complex number CUTLASS_HOST_DEVICE T const &imag() const { return _imag; } /// Accesses the imaginary part of the complex number CUTLASS_HOST_DEVICE T &imag() { return _imag; } /// Set the real part of the complex number CUTLASS_HOST_DEVICE void real(T real) { _real = real; } /// Set the imaginary part of the complex number CUTLASS_HOST_DEVICE void imag(T imag) { _imag = imag; } #if !defined(__CUDACC_RTC__) /// Converts to cuFloatComplex CUTLASS_HOST_DEVICE explicit operator cuFloatComplex() const { return make_cuFloatComplex(float(real()), float(imag())); } /// Converts to cuDoubleComplex CUTLASS_HOST_DEVICE explicit operator cuDoubleComplex() const { return make_cuDoubleComplex(real(), imag()); } #endif }; /////////////////////////////////////////////////////////////////////////////////////////////////// // // Accessors for complex template // /// Returns the real part of the complex number template <typename T> CUTLASS_HOST_DEVICE T const &real(complex<T> const &z) { return z.real(); } /// Returns the real part of the complex number template <typename T> CUTLASS_HOST_DEVICE T &real(complex<T> &z) { return z.real(); } /// Returns the imaginary part of the complex number template <typename T> CUTLASS_HOST_DEVICE T const &imag(complex<T> const &z) { return z.imag(); } /// Returns the imaginary part of the complex number template <typename T> CUTLASS_HOST_DEVICE T &imag(complex<T> &z) { return z.imag(); } /// Returns the real part of the real number template <typename T> CUTLASS_HOST_DEVICE T const &real(T const &r) { return r; } /// Returns the real part of the real number template <typename T> CUTLASS_HOST_DEVICE T &real(T &r) { return r; } /// Returns the imaginary part of the real number template <typename T> CUTLASS_HOST_DEVICE T const &imag(T const &r) { return T(); } /// Returns the imaginary part of the complex number template <typename T> CUTLASS_HOST_DEVICE T &imag(T &r) { return T(); } // // Output operators // #if !defined(__CUDACC_RTC__) template <typename T> std::ostream &operator<<(std::ostream &out, complex<T> const &z) { T _r = real(z); T _i = imag(z); if (bool(_i)) { return out << _r << "+i" << _i; } return out << _r; } #endif // // Non-member operators defined for complex types // // // Non-member functions defined for complex numbers // /// Returns the magnitude of the complex number template <typename T> CUTLASS_HOST_DEVICE T abs(complex<T> const &z) { return sqrt(norm(z)); } /// Returns the magnitude of the complex number template <typename T> CUTLASS_HOST_DEVICE T arg(complex<T> const &z) { return atan2(imag(z), real(z)); } /// Returns the squared magnitude of a real number template <typename T> CUTLASS_HOST_DEVICE T norm(T const &z) { return z * z; } /// Returns the squared magnitude of a real number template <> CUTLASS_HOST_DEVICE int8_t norm(int8_t const &z) { return static_cast<int8_t>(z * z); } /// Returns the squared magnitude of a complex number template <typename T> CUTLASS_HOST_DEVICE double norm(complex<T> const &z) { return real(z) * real(z) + imag(z) * imag(z); } /// Norm-accumulate calculation template <typename T, typename R> CUTLASS_HOST_DEVICE R norm_accumulate(T const &x, R const & accumulator) { return accumulator + static_cast<R>(x) * static_cast<R>(x); } /// Norm accumulate specialized for complex types template <typename T, typename R> CUTLASS_HOST_DEVICE R norm_accumulate(complex<T> const &z, R const &accumulator) { return accumulator + static_cast<R>(real(z)) * static_cast<R>(real(z)) + static_cast<R>(imag(z)) * static_cast<R>(imag(z)); } CUTLASS_HOST_DEVICE float conj(float const &z) { return z; } CUTLASS_HOST_DEVICE double conj(double const &z) { return z; } CUTLASS_HOST_DEVICE half_t conj(half_t const& z) { return z; } CUTLASS_HOST_DEVICE int32_t conj(int32_t const& z) { return z; } CUTLASS_HOST_DEVICE uint32_t conj(uint32_t const& z) { return z; } CUTLASS_HOST_DEVICE int64_t conj(int64_t const& z) { return z; } CUTLASS_HOST_DEVICE uint64_t conj(uint64_t const& z) { return z; } CUTLASS_HOST_DEVICE int4b_t conj(int4b_t const& z) { return z; } CUTLASS_HOST_DEVICE uint4b_t conj(uint4b_t const& z) { return z; } CUTLASS_HOST_DEVICE bfloat16_t conj(bfloat16_t const& z) { return z; } CUTLASS_HOST_DEVICE uint1b_t conj(uint1b_t const& z) { return z; } CUTLASS_HOST_DEVICE tfloat32_t conj(tfloat32_t const& z) { return z; } CUTLASS_HOST_DEVICE float_e4m3_t conj(float_e4m3_t const& z) { return z; } CUTLASS_HOST_DEVICE float_e5m2_t conj(float_e5m2_t const& z) { return z; } /// Returns the complex conjugate template <typename T> CUTLASS_HOST_DEVICE complex<T> conj(complex<T> const &z) { return complex<T>(real(z), -imag(z)); } /// Projects the complex number z onto the Riemann sphere template <typename T> CUTLASS_HOST_DEVICE complex<T> proj(complex<T> const &z) { T d = real(z) * real(z) + imag(z) * imag(z) + T(1); return complex<T>((T(2) * real(z)) / d, (T(2) * imag(z)) / d); } /// Returns a complex number with magnitude r and phase theta template <typename T> CUTLASS_HOST_DEVICE complex<T> polar(T const &r, T const &theta = T()) { return complex<T>(r * cos(theta), r * sin(theta)); } /// Computes the complex exponential of z. template <typename T> CUTLASS_HOST_DEVICE complex<T> exp(complex<T> const &z) { return complex<T>(fast_exp(real(z)) * fast_cos(imag(z)), fast_exp(real(z)) * fast_sin(imag(z))); } /// Computes the log of z template <typename T> CUTLASS_HOST_DEVICE complex<T> log(complex<T> const &z) { return complex<T>(log(abs(z)), arg(z)); } /// Computes the log base 10 of z template <typename T> CUTLASS_HOST_DEVICE complex<T> log10(complex<T> const &z) { return log(z) / T(log(T(10))); } /// Computes the square root of complex number z template <typename T> CUTLASS_HOST_DEVICE complex<T> sqrt(complex<T> const &z) { return sqrt(T(2)) / T(2) * complex<T>(sqrt(sqrt(norm(z)) + real(z)), (imag(z) < 0 ? T(-1) : T(1)) * sqrt(sqrt(norm(z)) - real(z))); } /// Computes the cosine of complex z. template <typename T> CUTLASS_HOST_DEVICE complex<T> cos(complex<T> const &z) { return (exp(z) + exp(-z)) / T(2); } /// Computes the sin of complex z. template <typename T> CUTLASS_HOST_DEVICE complex<T> sin(complex<T> const &z) { return (exp(-z) - exp(z)) * complex<T>(T(0), T(1) / T(2)); } /// Comparison template <typename T> CUTLASS_HOST_DEVICE bool operator<(complex<T> const &lhs, complex<T> const &rhs) { return true; } ////////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for complex-valued type. template <typename T> struct RealType< complex<T> > { using Type = T; /// Number of elements static int const kExtent = 2; CUTLASS_HOST_DEVICE static complex<T> from_real(double x) { return complex<T>(static_cast<T>(x)); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <> CUTLASS_HOST_DEVICE cutlass::complex<half_t> from_real<cutlass::complex<half_t> >(double r) { return cutlass::complex<half_t>(half_t(r)); } template <> CUTLASS_HOST_DEVICE cutlass::complex<float> from_real<cutlass::complex<float> >(double r) { return cutlass::complex<float>(float(r)); } template <> CUTLASS_HOST_DEVICE cutlass::complex<double> from_real<cutlass::complex<double> >(double r) { return cutlass::complex<double>(r); } ////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct is_complex { static bool const value = false; }; template <typename T> struct is_complex<complex<T>> { static bool const value = true; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // functional.h numeric specializations ///////////////////////////////////////////////////////////////////////////////////////////////// /// Squares with optional conversion template <typename T, typename Output> struct magnitude_squared<complex<T>, Output> { CUTLASS_HOST_DEVICE Output operator()(complex<T> lhs) const { multiplies<Output> mul_op; Output y_r = Output(lhs.real()); Output y_i = Output(lhs.imag()); return mul_op(y_r, y_r) + mul_op(y_i, y_i); } }; /// Fused multiply-add template <typename T> struct multiply_add<complex<T>, complex<T>, complex<T>> { CUTLASS_HOST_DEVICE complex<T> operator()( complex<T> const &a, complex<T> const &b, complex<T> const &c) const { T real = c.real(); T imag = c.imag(); real += a.real() * b.real(); real += -a.imag() * b.imag(); imag += a.real() * b.imag(); imag += a.imag () * b.real(); return complex<T>{ real, imag }; } }; /// Fused multiply-add template <typename T> struct multiply_add<complex<T>, T, complex<T>> { CUTLASS_HOST_DEVICE complex<T> operator()( complex<T> const &a, T const &b, complex<T> const &c) const { T real = c.real(); T imag = c.imag(); real += a.real() * b; imag += a.imag () * b; return complex<T>{ real, imag }; } }; /// Fused multiply-add template <typename T> struct multiply_add<T, complex<T>, complex<T>> { CUTLASS_HOST_DEVICE complex<T> operator()( T const &a, complex<T> const &b, complex<T> const &c) const { T real = c.real(); T imag = c.imag(); real += a * b.real(); imag += a * b.imag(); return complex<T>{ real, imag }; } }; /// Conjugate template <typename T> struct conjugate<complex<T>> { CUTLASS_HOST_DEVICE complex<T> operator()(complex<T> const &a) const { return conj(a); } }; /// Computes the square of a difference with optional conversion template <typename T, typename Output> struct magnitude_squared_difference<complex<T>, Output> { CUTLASS_HOST_DEVICE Output operator()(complex<T> lhs, complex<T> rhs) const { multiplies<Output> mul_op; Output y_r = Output(lhs.real()) - Output(rhs.real()); Output y_i = Output(lhs.imag()) - Output(rhs.imag()); return mul_op(y_r, y_r) + mul_op(y_i, y_i); } }; /// Reduces value into the data pointed to by ptr (complex<T> specialization) template <typename T> struct atomic_add<complex<T>> { CUTLASS_DEVICE void operator()(complex<T> *ptr, const complex<T> &data) { data.red(ptr); } }; ////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass //////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/complex.h/0
{ "file_path": "include/cutlass/complex.h", "repo_id": "include", "token_count": 7008 }
18
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/conv/convolution.h" #include "cutlass/arch/arch.h" #include "cute/layout.hpp" #include "cute/numeric/integral_constant.hpp" ////////////////////////////////////////////////////////////////////////////// namespace cutlass::conv { ////////////////////////////////////////////////////////////////////////////// // // Policies for categorical dispatch of mainloop against kernel grid schedules // struct KernelImplicitTmaWarpSpecializedSm90 { }; struct KernelImplicitTmaWarpSpecializedSm90Cooperative { }; struct KernelImplicitTmaWarpSpecializedSm90Pingpong { }; // // Collective Mainloop Policies // // n-buffer in smem (Hopper TMA), pipelined with Hopper GMMA and TMA, static schedule between TMA and GMMA // for fprop template< conv::Operator ConvOp_, int Stages_, int NumSpatialDimensions_, class ClusterShape_ = cute::Shape<cute::C<1>,cute::C<1>,cute::C<1>>, class KernelSchedule = KernelImplicitTmaWarpSpecializedSm90, int PipelineAsyncMmaStages_ = 1 > struct MainloopSm90TmaGmmaWarpSpecializedImplicitGemm { static constexpr int Stages = Stages_; static constexpr int NumSpatialDimensions = NumSpatialDimensions_; static constexpr Operator ConvOp = ConvOp_; static constexpr int PipelineAsyncMmaStages = PipelineAsyncMmaStages_; using ClusterShape = ClusterShape_; using ArchTag = arch::Sm90; using Schedule = KernelSchedule; static_assert(NumSpatialDimensions >= 1); static_assert(! (cute::is_same_v<KernelSchedule,KernelImplicitTmaWarpSpecializedSm90Cooperative> || cute::is_same_v<KernelSchedule,KernelImplicitTmaWarpSpecializedSm90Pingpong>), "Persistent schedules not support for conv yet."); }; ////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::conv
include/cutlass/conv/dispatch_policy.hpp/0
{ "file_path": "include/cutlass/conv/dispatch_policy.hpp", "repo_id": "include", "token_count": 1012 }
19
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile) matrix from memory. This iterator assumes TensorNHWC layout of tensors in Global Memory. The iterator is specialized for each of the three convolution operators: forward propagation (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad). */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/functional.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/threadblock/conv2d_params.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename Element_, typename ThreadMap_, conv::StrideSupport StrideSupport_ = conv::StrideSupport::kStrided, typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess> > class Conv2dDgradOutputGradientTileAccessIteratorAnalytic; ///////////////////////////////////////////////////////////////////////////////////////////////// // Conv2dDgradOutputGradientTileAccessIteratorAnalytic strided dgrad needs special handling using // unscaled coordinations template < typename Shape_, typename Element_, typename ThreadMap_, typename AccessType_ > class Conv2dDgradOutputGradientTileAccessIteratorAnalytic < Shape_, Element_, ThreadMap_, conv::StrideSupport::kStrided, AccessType_ > { public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = layout::TensorNHWC; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using TensorRef = cutlass::TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic; static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided; static int const kConvDim = 2; using ConvProblemSize = typename conv::Conv2dProblemSize; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); static_assert(sizeof_bits<Element>::value >= 8, "DGRAD requires elements of size 8b or greater."); // // Simpligying assertions // static_assert(ThreadMap::Iterations::kContiguous == 1, "Require Iterations::kContiguous == 1"); // // Parameters structure // using Params = Conv2dDgradOutputGradientTileAccessIteratorAnalyticParams; private: Params const &params_; Conv2dProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; LongIndex iteration_vector_; char const *pointer_; int filter_k_; int filter_r_; int filter_s_; int start_r_; int start_s_; int offset_n_[ThreadMap::Iterations::kStrided]; int offset_p_[ThreadMap::Iterations::kStrided]; int offset_q_[ThreadMap::Iterations::kStrided]; public: CUTLASS_HOST_DEVICE Conv2dDgradOutputGradientTileAccessIteratorAnalytic( Params const &params, Conv2dProblemSize const &problem_size, Element const *ptr, int thread_idx, FastDivmod const &stride_h_divmod, FastDivmod const &stride_w_divmod, int start_r, int start_s, MatrixCoord const &threadblock_offset = MatrixCoord() // threadblock offset - units are whole CTA tiles ): params_(params), problem_size_(problem_size), pointer_(reinterpret_cast<char const *>(ptr)), filter_k_(0), filter_r_(start_r), filter_s_(start_s), start_r_(start_r), start_s_(start_s) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); filter_k_ = threadblock_offset.column() + thread_coord.contiguous(); int filter_r = filter_r_; int filter_s = filter_s_; if (problem_size_.mode == Mode::kConvolution) { filter_r = (problem_size_.R - 1 - filter_r); filter_s = (problem_size_.S - 1 - filter_s); } // Starting h, w positions for filter position in gemm_k=0 int start_h, start_w; strided_dgrad_starting_coords( problem_size_, stride_h_divmod, stride_w_divmod, filter_r, filter_s, start_h, start_w); // Effective P and Q for filter position required for remapping NHW rows int P = (problem_size_.H - start_h + problem_size_.stride_h - 1) / problem_size_.stride_h; int Q = (problem_size_.W - start_w + problem_size_.stride_w - 1) / problem_size_.stride_w; CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { int offset_npq = (threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided) % params_.tiled_rows_per_filter; // (STEP 1) [reorder NHW rows to start with same filter positions] offset_n_[s] = offset_npq / (P * Q); int residual = offset_npq % (P * Q); int p = (residual / Q); int q = (residual % Q); int mapped_h = (start_h + p * problem_size_.stride_h); int mapped_w = (start_w + q * problem_size_.stride_w); // Access (p, q) coordinates for Dy tensor and a filter position in gemm_k=0 // note that (h + pad_h - filter_r) and (w + pad_w - filter_s) are divisible // by stride_h and stride_w offset_p_[s] = (mapped_h + problem_size_.pad_h - filter_r) / problem_size_.stride_h; offset_q_[s] = (mapped_w + problem_size_.pad_w - filter_s) / problem_size_.stride_w; } } CUTLASS_HOST_DEVICE static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) { return Params(problem_size, layout, sizeof_bits<Element>::value, {Shape::kRow, Shape::kColumn}); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_vector_ = index % kAccessesPerVector; int residual_access = index / kAccessesPerVector; iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous; iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_HOST_DEVICE void advance() { // Move filter_s by stride_w filter_s_ += problem_size_.stride_w; if (filter_s_ < problem_size_.S) { return; } // Restore filter_s filter_s_ = start_s_; // Move filter_r by stride_h filter_r_ += problem_size_.stride_h; if (filter_r_ < problem_size_.R) { return; } // Restore filter_r filter_r_ = start_r_; // Move filter_k filter_k_ += Shape_::kColumn * problem_size_.split_k_slices; } /// Returns the coordinate in the output tensor Dy that is currently pointed to /// by the iterator. CUTLASS_HOST_DEVICE TensorCoord at() const { int n = offset_n_[iteration_strided_]; int p = offset_p_[iteration_strided_]; int q = offset_q_[iteration_strided_]; int conv_sign = (problem_size_.mode == Mode::kConvolution ? 1 : -1); p += (conv_sign * (filter_r_ / problem_size_.stride_h)); q += (conv_sign * (filter_s_ / problem_size_.stride_w)); int k = filter_k_ + iteration_vector_ * AccessType::kElements; return TensorCoord( n, p, q, k); } /// Returns true if the current coordinate is within the output tensor Dy CUTLASS_HOST_DEVICE bool valid() const { TensorCoord coord = at(); return coord.n() < problem_size_.N && coord.h() >= 0 && coord.h() < problem_size_.P && coord.w() >= 0 && coord.w() < problem_size_.Q && coord.c() < problem_size_.K; } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_HOST_DEVICE AccessType const *get() const { TensorCoord coord = at(); LongIndex offset = params_.layout(coord); return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8); } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv2dDgradOutputGradientTileAccessIteratorAnalytic &operator++() { ++iteration_vector_; if (iteration_vector_ < kAccessesPerVector) { return *this; } iteration_vector_ = 0; ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(Conv2dProblemSize const &problem_size) { // check alignment constraint on iterator's contiguous dimension if (problem_size.K % AccessType::kElements) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Conv2dDgradOutputGradientTileAccessIteratorAnalytic for unity strides can be optimized by // eliminating modulo arithmetic to compute unscaled coordinates template < typename Shape_, typename Element_, typename ThreadMap_, typename AccessType_ > class Conv2dDgradOutputGradientTileAccessIteratorAnalytic < Shape_, Element_, ThreadMap_, conv::StrideSupport::kUnity, AccessType_ > { public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = layout::TensorNHWC; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using TensorRef = cutlass::TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic; static StrideSupport const kStrideSupport = conv::StrideSupport::kUnity; static int const kConvDim = 2; using ConvProblemSize = typename conv::Conv2dProblemSize; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); static_assert(sizeof_bits<Element>::value >= 8, "DGRAD requires elements of size 8b or greater."); // // Simpligying assertions // static_assert(ThreadMap::Iterations::kContiguous == 1, "Require Iterations::kContiguous == 1"); // // Parameters structure // struct Params { Layout layout; // // Methods // CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params( Conv2dProblemSize const &problem_size, Layout const &layout ): layout(layout) { } }; private: Params const &params_; Conv2dProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; LongIndex iteration_vector_; char const *pointer_; int filter_k_; int filter_r_; int filter_s_; int offset_n_[ThreadMap::Iterations::kStrided]; int offset_w_[ThreadMap::Iterations::kStrided]; int offset_h_[ThreadMap::Iterations::kStrided]; public: CUTLASS_HOST_DEVICE Conv2dDgradOutputGradientTileAccessIteratorAnalytic( Params const &params, Conv2dProblemSize const &problem_size, Element const *ptr, int thread_idx, MatrixCoord const &threadblock_offset = MatrixCoord() // threadblock offset - units are whole CTA tiles ): params_(params), problem_size_(problem_size), pointer_(reinterpret_cast<char const *>(ptr)), filter_k_(0), filter_r_(0), filter_s_(0) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); filter_k_ = threadblock_offset.column() + thread_coord.contiguous(); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { int offset_nhw = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided; offset_n_[s] = offset_nhw / (problem_size_.H * problem_size_.W); int residual = offset_nhw % (problem_size_.H * problem_size_.W); offset_h_[s] = residual / problem_size_.W; offset_w_[s] = residual % problem_size_.W; } } CUTLASS_HOST_DEVICE static Params getParams(Conv2dProblemSize const &problem_size, Layout const &layout) { return Params(problem_size, layout); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_vector_ = index % kAccessesPerVector; int residual_access = index / kAccessesPerVector; iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous; iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_HOST_DEVICE void advance() { // move to the next tile ++filter_s_; if (filter_s_ < problem_size_.S) { return; } filter_s_ = 0; ++filter_r_; if (filter_r_ < problem_size_.R) { return; } filter_r_ = 0; filter_k_ += Shape_::kColumn * problem_size_.split_k_slices; } /// Returns the coordinate in the output tensor Dy that is currently pointed to /// by the iterator. CUTLASS_HOST_DEVICE TensorCoord at() const { int n = offset_n_[iteration_strided_]; int h = offset_h_[iteration_strided_]; int w = offset_w_[iteration_strided_]; int r = filter_r_; int s = filter_s_; if (problem_size_.mode == Mode::kConvolution) { r = (problem_size_.R - 1 - r); s = (problem_size_.S - 1 - s); } int p = (h + problem_size_.pad_h - r * problem_size_.dilation_h) / problem_size_.stride_h; int q = (w + problem_size_.pad_w - s * problem_size_.dilation_w) / problem_size_.stride_w; int k = filter_k_ + iteration_vector_ * AccessType::kElements; return TensorCoord(n, p, q, k); } /// Returns true if the current coordinate is within the output tensor Dy CUTLASS_HOST_DEVICE bool valid() const { TensorCoord coord = at(); return coord.n() < problem_size_.N && coord.h() >= 0 && coord.h() < problem_size_.P && coord.w() >= 0 && coord.w() < problem_size_.Q && coord.c() < problem_size_.K; } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_HOST_DEVICE AccessType const *get() const { TensorCoord coord = at(); LongIndex offset = params_.layout(coord); return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8); } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv2dDgradOutputGradientTileAccessIteratorAnalytic &operator++() { ++iteration_vector_; if (iteration_vector_ < kAccessesPerVector) { return *this; } iteration_vector_ = 0; ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(Conv2dProblemSize const &problem_size) { // Conv2dDgradFilterTileAccessIteratorAnalytic unity stride specialization // only supports (stride_h, stride_w) = (1, 1) if (problem_size.stride() != MatrixCoord({1, 1})) { return Status::kErrorNotSupported; } // check alignment constraint on iterator's contiguous dimension if (problem_size.K % AccessType::kElements) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
include/cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_analytic.h/0
{ "file_path": "include/cutlass/conv/threadblock/conv2d_dgrad_output_gradient_tile_access_iterator_analytic.h", "repo_id": "include", "token_count": 6606 }
20