text
stringlengths 27
947k
| id
stringlengths 10
118
| metadata
dict | __index_level_0__
int64 0
80
|
---|---|---|---|
var searchData=
[
['half_5ft',['half_t',['../structcutlass_1_1half__t.html',1,'cutlass']]],
['hosttensor',['HostTensor',['../classcutlass_1_1HostTensor.html',1,'cutlass']]]
];
| docs/search/classes_7.js/0 | {
"file_path": "docs/search/classes_7.js",
"repo_id": "docs",
"token_count": 78
} | 0 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <vector>
#include <iostream>
// Cutlass command line parser
#include "cutlass/util/command_line.h"
class Options {
public:
bool help;
bool good;
std::vector<int> extent; ///< extent of tile to fill
std::vector<int> stride; ///< stride vector for layout function
std::vector<int> output_shape; ///< output shape
int vectorize; ///< sequences of consecutive output elements are concatenated into a vector
/// if, and only if, they were consecutive in source memory
public:
/// Options
Options():
help(false),
good(true),
extent({32, 8}),
stride({32}),
output_shape({16, 8}),
vectorize(1) {
}
/// Constructs from command line parser
Options(cutlass::CommandLine const & cmd_line): help(false), good(true) {
if (cmd_line.check_cmd_line_flag("help") ||
cmd_line.check_cmd_line_flag("h")) {
help = true;
}
if (cmd_line.check_cmd_line_flag("extent")) {
cmd_line.get_cmd_line_arguments("extent", extent);
}
else {
extent = {32, 8};
}
if (cmd_line.check_cmd_line_flag("stride")) {
cmd_line.get_cmd_line_arguments("stride", stride);
}
int default_output_shape[] = {16, 8};
if (cmd_line.check_cmd_line_flag("output-shape")) {
cmd_line.get_cmd_line_arguments("output-shape", output_shape);
}
for (int i = int(output_shape.size()); i < 2; ++i) {
output_shape.push_back(default_output_shape[i]);
}
if (cmd_line.check_cmd_line_flag("vectorize")) {
cmd_line.get_cmd_line_argument("vectorize", vectorize);
}
else {
vectorize = 1;
}
if (output_shape.front() % vectorize) {
std::cerr << "Error: --vectorize=" << vectorize
<< " must divide contiguous elements in --output-shape="
<< output_shape.at(0) << "," << output_shape.at(1) << std::endl;
good = false;
}
}
/// Prints usage statement
static void print_usage(std::ostream &out) {
out
<< " Options:\n"
<< " --help Displays this help message.\n"
<< " --extent=<extent> Specifies the layout-specific extent (as comma-delimited array).\n"
<< " --stride=<stride> Specifies the layout-specific stride vector (comma-delimited array)\n"
<< " --output-shape=<extent> Specifies the dimensions of a row-major output matrix. \n"
<< " --vectorize=<vector length> If possible, vectorizes the output into vectors of consecutive elements\n";
}
};
| examples/03_visualize_layout/options.h/0 | {
"file_path": "examples/03_visualize_layout/options.h",
"repo_id": "examples",
"token_count": 1586
} | 1 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run convolution kernels using functions and data structures
provided by CUTLASS using tensor cores; which we run on a NVIDIA Turing GPU.
Writing a single high performance convolution kernel is hard but do-able. Whereas writing
high performance kernels at scale which works for multiple problem sizes with good abstractions is
really hard. CUTLASS solves this problem by providing simplified abstractions to compose
multiple sections of implicit gemm kernel. When used properly, the kernels can hit peak performance
of GPU easily.
CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp
and thread-block level, they compute on their own tile-size with higher level of tile sizes being
composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used
to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute
threadblock-tile (tile size computed by a threadblock).
In thie example, we split variable initialization into
1. Setting up data properties : describes how tensors are laid out in the memory and how the kernel
can view them (logical to physical mapping)
2. Setting up computation properties : describes how the above set tensors will be used to compute
output of convolution.
First, we setup the data types of the input tensor A, weights' tensor B and output tensor C along
with alpha, beta as the equation for convolution is C = alpha * Conv(A, B) + beta * C. In CUTLASS,
the kernels first compute Conv(A, B) and leave the rest of the computation to end of the kernel as
alpha * X + beta * C is a simple element-wise operation on X (Conv(A, B)) and C. We call this as
epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to
ElementComputeEpilogue = float. We want to use MMA instructions on Turing and they support 4-bit
signed integer. But int4b_t is not fully supported by Nvidia software stack, so CUTLASS introduces
cutlass::int4b_t. We use the data type for elements in input tensor A and B as cutlass::int4b_t. We
convey this to CUTLASS kernel by initializing template variables ElementAccumulator (int32_t),
ElementComputeEpilogue (float), ElementInputA (cutlass::int4b_t), ElementInputB (cutlass::int4b_t),
ElementOutput (int32_t). Communicating just the data type is not enough. As the data is laid out
linearly in memory, we have to convey the layout of tensors. We do that by initializing template
variables LayoutInputA, LayoutInputB and LayoutOutput to TensorNHWC cutlass variable. Next, we setup
rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template
variable EpilogueOp, which takes the data type of output ElementOutput (int32_t), the number of
elements per vector memory access (32), data type of accumulator (int32_t) and data type of
computation of linear combination (alpha * X + beta * C).
Now that we setup the properties of data, we have to setup properties of computation.
Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x128x128,
64x64x128, 8x8x32 (MxNxK) respectively. When passed to instantiate CUTLASS Implicit GEMM kernel, it
internally deduces the amount of threads needed per thread-block, amount of shared memory, storing
data in bank-conflict free manner, and ton of other variables required to compose, initialize and
launch a high performance Implicit GEMM kernel. This is the beauty of CUTLASS, it relieves developer
from understanding and coding complicated hardware optimizations which can easily go wrong.
CUTLASS also supports multiple MMA pipelines in a threadblock. What are MMA pipelines? MMA pipelines
constitute the whole process of loading input data from global memory to shared memory, loading data
from shared memory to registers, doing matrix multiplication, store to global memory. The below flow
sequence shows a typical mma pipeline.
tensor in global memory -> registers -> tile in shared memory -> registers -> mma -> registers ->
output to global memory
The problem with single pipeline is, each stage is synchronous which means, each stage has to wait
until the previous finished executing. There are stages in the pipeline which do not have fixed
latency, for example, the loads from global memory and shared memory. Therefore, we can add one more
pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads.
Finally, the pipeline in a kernel looks like
(1) tensor in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5)
mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) tensor in global
memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers ->
(9) output to global memory
This way, you can hide the second global memory load latency by doing computation on already loaded
input data.
There are few more template variables initialized such as, which threadblock tile of output matrix
is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on.
These are all put together to create a template variable which describes CUTLASS Implicit GEMM
kernel using cutlass::conv::device::ImplicitGemm template.
The next step is to initialize physical data, instantiate and initialize CUTLASS kernel and run it.
We use CUTLASS utilities to initialize, fill, compare tensors as they are simple and doesn't come
in the way of learning CUTLASS.
Once all the tensors are initialized and filled with data, create arguments tuple to launch CUTLASS
kernel which takes problem size (N = 1, H = 64, W = 64, C = 128), filter size (K = 64,
R = 3, S = 3, C = 128 ), padding, strides, dilation, tensors, alpha, beta and the
important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space
memory required by the kernel we instantiated. If yes, we create it and pass it along with other
arguments created to initialize CUTLASS kernel then, the kernel is launched.
In this example, we later on launch a reference convolution kernel (from CUTLASS utilities) to
compare if the output from CUTLASS kernel is same as the reference implicit GEMM kernel.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = int32_t; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::int4b_t; // Data type of elements in input tensor
using ElementInputB = cutlass::int4b_t; // Data type of elements in input tensor
using ElementOutput = cutlass::int4b_t; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm75;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 2;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombinationClamp<
ElementOutput, // Data type of output matrix.
8, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAddSaturate,
cutlass::conv::IteratorAlgorithm::kAnalytic
>::Kernel;
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(false),
measure_performance(true),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of int4b_t elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 32 elements.
//
int const kAlignment = 32;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "09_turing_tensorop_conv2dfprop example\n\n"
<< " This example uses Turing's Tensor Core operators on int4 data types to compute\n"
<< " forward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag=<string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n"
<< "$ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_c(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill tensor C on host with zeros
cutlass::reference::host::TensorFill(
tensor_c.host_view());
// Fill tensor C for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_c.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_ref_c.sync_device();
//
// Define arguments for CUTLASS Convolution
//
// mode (kCrossCorrelation or kConvolution)
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices);
// Construct ImplicitGemm::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_c.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on host...\n";
// Compute with reference implementation
cutlass::reference::host::Conv2dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
ElementOutput,
cutlass::NumericConverterClamp<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_a.host_ref(),
tensor_b.host_ref(),
tensor_c.host_ref(),
tensor_ref_c.host_ref(),
options.alpha,
options.beta
);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_c.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_c.host_view(),
tensor_ref_c.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "09_tensor_conv_workspace_conv2dfprop_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_c.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
// Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2.
//
// CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
return 0;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major > 7 || (props.major == 7 && props.minor >= 5))) {
std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75."
<< std::endl;
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {1, 32, 64, 128, 256, 512};
struct Benchmark {
int h, w, c, k, r, s;
} layers[] = {
{56, 56, 64, 256, 1, 1},
{56, 56, 64, 64, 1, 1},
{56, 56, 64, 64, 3, 3},
{56, 56, 256, 64, 1, 1},
{56, 56, 256, 512, 1, 1},
{56, 56, 256, 128, 1, 1},
{28, 28, 128, 128, 3, 3},
{28, 28, 128, 512, 1, 1},
{28, 28, 512, 128, 1, 1},
{28, 28, 512, 1024, 1, 1},
{28, 28, 512, 256, 1, 1},
{14, 14, 256, 256, 3, 3},
{14, 14, 256, 1024, 1, 1},
{14, 14, 1024, 256, 1, 1},
{14, 14, 1024, 2048, 1, 1},
{14, 14, 1024, 512, 1, 1},
{7, 7, 512, 512, 3, 3},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/09_turing_tensorop_conv2dfprop/turing_tensorop_conv2dfprop.cu/0 | {
"file_path": "examples/09_turing_tensorop_conv2dfprop/turing_tensorop_conv2dfprop.cu",
"repo_id": "examples",
"token_count": 10022
} | 2 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "device/b2b_implicit_gemm_convolution.h"
#include "b2b_conv2d_run.h"
#include "test_run.h"
////////////////////////////////////////////////////////////////////////////////
cutlass::conv::Conv2dProblemSize conv2d_f16_sm75_problem_size_0 (
{32, 56, 56, 64}, // input size (NHWC)
{64, 3, 3, 64}, // filter size (KRSC)
{1, 1, 1, 1}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
{32, 56, 56, 64} // output size (NPQK)
);
cutlass::conv::Conv2dProblemSize conv2d_f16_sm75_problem_size_1 (
{32, 56, 56, 64}, // input size (NHWC)
{128, 1, 1, 64}, // filter size (KRSC)
{0, 0, 0, 0}, // padding (pad_h, _, pad_w, _)
{1, 1}, // stride (stride_h, stride_w)
{1, 1}, // dilation (dilation_h, dilation_w)
{32, 56, 56, 128} // output size (NPQK)
);
bool run_nonfused_conv2d_fprop_optimized_f16_sm75() {
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
ElementCompute alpha0 = ElementCompute(1);
ElementCompute beta0 = ElementCompute(1); //beta=1 for bias
ElementCompute alpha1 = ElementCompute(1);
ElementCompute beta1 = ElementCompute(1); //beta=1 for bias
using ThreadblockShape0 = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape0 = cutlass::gemm::GemmShape<32, 32, 32>;
using ThreadblockShape1 = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape1 = cutlass::gemm::GemmShape<64, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Conv2dFpropKernel0 = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementA, cutlass::layout::TensorNHWC,
ElementB, cutlass::layout::TensorNHWC,
ElementC, cutlass::layout::TensorNHWC,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm75,
ThreadblockShape0,
WarpShape0,
InstructionShape,
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
128 / cutlass::sizeof_bits<ElementC>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
2,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::IteratorAlgorithm::kOptimized
>::Kernel;
using Conv2dFprop0 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel0>;
using Conv2dFpropKernel1 = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementA, cutlass::layout::TensorNHWC,
ElementB, cutlass::layout::TensorNHWC,
ElementC, cutlass::layout::TensorNHWC,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm75,
ThreadblockShape1,
WarpShape1,
InstructionShape,
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
128 / cutlass::sizeof_bits<ElementC>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
2,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::IteratorAlgorithm::kOptimized
>::Kernel;
using Conv2dFprop1 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel1>;
B2bNonFusedConv2dRun<Conv2dFprop0, Conv2dFprop1> nonFusedConv2d;
std::cout << "Running Non-fused back-to-back FP16 Optimized Convolution Fprops...\n";
bool pass = nonFusedConv2d.run(conv2d_f16_sm75_problem_size_0, conv2d_f16_sm75_problem_size_1, cutlass::conv::SplitKMode::kSerial,
alpha0, beta0, alpha1, beta1);
if(pass)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return pass;
}
bool run_fused_conv2d_fprop_optimized_f16_sm75_rf_res() {
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using ElementAccumulator = cutlass::half_t;
using ElementCompute = cutlass::half_t;
ElementCompute alpha0 = ElementCompute(1);
//Fused kernel has built-in bias, setting beta=0
ElementCompute beta0 = ElementCompute(0);
ElementCompute alpha1 = ElementCompute(1);
ElementCompute beta1 = ElementCompute(1); //use beta for bias
using ThreadblockShape0 = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape0 = cutlass::gemm::GemmShape<16, 64, 32>;
using ThreadblockShape1 = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape1 = cutlass::gemm::GemmShape<16, 128, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using EpilogueOutputOp0 =
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
InstructionShape::kM * InstructionShape::kN / 32,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>;
using EpilogueOutputOp1 =
cutlass::epilogue::thread::LinearCombinationRelu<
ElementC,
128 / cutlass::sizeof_bits<ElementC>::value,
ElementAccumulator,
ElementCompute,
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>;
const bool SmemAccumulator = false;
using B2bConv2dFpropKernel = typename cutlass::conv::kernel::DefaultB2bConv2dFprop<
ElementA, cutlass::layout::TensorNHWC,
ElementB, cutlass::layout::TensorNHWC,
ElementC, cutlass::layout::TensorNHWC,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm75,
ThreadblockShape0,
ThreadblockShape1,
WarpShape0,
WarpShape1,
InstructionShape,
EpilogueOutputOp0,
EpilogueOutputOp1,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>,
2,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::IteratorAlgorithm::kOptimized,
SmemAccumulator
>::Kernel;
using B2bConv2dFprop = cutlass::conv::device::B2bImplicitGemmConvolution<B2bConv2dFpropKernel>;
B2bFusedConv2dRun<B2bConv2dFprop> fusedConv2d;
std::cout << "Running Fused back-to-back FP16 Optimized Convolution Fprops with RF Residency...\n";
bool pass = fusedConv2d.run(conv2d_f16_sm75_problem_size_0, conv2d_f16_sm75_problem_size_1, cutlass::conv::SplitKMode::kSerial,
alpha0, beta0, alpha1, beta1);
if(pass)
std::cout << "Pass\n";
else
std::cout << "Fail\n";
return pass;
}
int main() {
std::vector<bool (*)()>funcs = {
&run_nonfused_conv2d_fprop_optimized_f16_sm75,
&run_fused_conv2d_fprop_optimized_f16_sm75_rf_res
};
return testRun(75, funcs, "conv f16 RF residency");
}
////////////////////////////////////////////////////////////////////////////////
| examples/13_two_tensor_op_fusion/fused_two_convs_f16_sm75_rf.cu/0 | {
"file_path": "examples/13_two_tensor_op_fusion/fused_two_convs_f16_sm75_rf.cu",
"repo_id": "examples",
"token_count": 3348
} | 3 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates how to call a CUTLASS SYRK kernel and provides a naive reference
matrix multiply kernel to verify its correctness.
The CUTLASS Syrk template is instantiated in the function CutlassSsyrkNN. This is kernel computes
the symmetric rank-k update (SYRK) using double-precision floating-point arithmetic and assumes
all matrices have column-major layout.
The threadblock tile size is chosen as 16x32x16 which offers good performance for large matrices.
See the CUTLASS Parallel for All blog post for more exposition on the tunable parameters available
in CUTLASS.
https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/
Aside from defining and launching the SSYRK kernel, this example does not use any other components
or utilities within CUTLASS. Such utilities are demonstrated elsewhere in other examples and are
prevalent in the CUTLASS unit tests.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// Helper methods to check for errors
#include "helper.h"
//
// CUTLASS includes needed for double-precision SYRK kernel
//
// Defines cutlass::gemm::device::Syrk, the generic Syrk computation template class.
#include "cutlass/gemm/device/rank_k.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// This function defines a CUTLASS SYRK kernel instantiation, constructs its parameters object,
// and launches it on the CUDA device.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS SYRK template and launch a SYRK kernel.
cudaError_t CutlassSsyrkNN(
int N,
int K,
double alpha,
double const *A,
int lda,
double beta,
double *C,
int ldc) {
// Define type definition for double-precision CUTLASS SYRK with column-major
// input matrices and 16x32x16 threadblock tile size (chosen by default).
//
// To keep the interface manageable, several helpers are defined for plausible compositions
// including the following example for double-precision SYRK. Typical values are used as
// default template arguments.
//
// To view the full syrk device API interface, see `cutlass/gemm/device/syrk.h`
using ColumnMajor = cutlass::layout::ColumnMajor;
using CutlassSyrk = cutlass::gemm::device::RankK<
double,
ColumnMajor,
double,
ColumnMajor,
cutlass::FillMode::kLower,
double,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<16, 32, 16>,
cutlass::gemm::GemmShape<16, 16, 16>,
cutlass::gemm::GemmShape<8, 8, 4>,
cutlass::epilogue::thread::LinearCombination<
double,
1,
double,
double
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>,
5, // Stages
1, // AlignmentA
false, // SplitKSerail
cutlass::arch::OpMultiplyAdd,
cutlass::ComplexTransform::kNone,
cutlass::BlasMode::kSymmetric
>;
// Define a CUTLASS SYRK type
CutlassSyrk syrk_operator;
// Construct the CUTLASS SYRK arguments object.
//
// One of CUTLASS's design patterns is to define syrk argument objects that are constructible
// in host code and passed to kernels by value. These may include pointers, strides, scalars,
// and other arguments needed by Syrk and its components.
//
// The benefits of this pattern are (1.) a structured, composable strategy for passing host-constructible
// arguments to kernels and (2.) minimized initialization overhead on kernel entry.
//
CutlassSyrk::Arguments args(cutlass::gemm::GemmUniversalMode::kGemm,
{N, N, K}, // Syrk Problem dimensions
1, // batch_count,
{alpha, beta}, // Scalars used in the Epilogue
reinterpret_cast<void const *>(A),
const_cast<void *>(reinterpret_cast<void *>(C)),
reinterpret_cast<void *>(C), // destination matrix D (may be different memory than source C matrix)
(int64_t)N*K, // Batch strides
(int64_t)N*N,
(int64_t)N*N,
lda,
ldc,
ldc);
//
// Launch the CUTLASS SYRK kernel.
//
cutlass::Status status = syrk_operator(args);
//
// Return a cudaError_t if the CUTLASS SYRK operator returned an error code.
//
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
// Return success, if no errors were encountered.
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// The source code after this point in the file is generic CUDA using the CUDA Runtime API
// and simple CUDA kernels to initialize matrices and compute the general matrix product.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize a matrix with small integers.
__global__ void InitializeMatrix_kernel(
double *matrix,
int ldm,
int rows,
int columns,
int seed = 0) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < columns) {
int offset = i + j * ldm;
// Generate arbitrary elements.
int const k = 16807;
int const m = 16;
double value = double(((offset + seed) * k % m) - m / 2);
matrix[offset] = value;
}
}
/// Simple function to initialize a matrix to arbitrary small integers.
cudaError_t InitializeMatrix(double *matrix, int ldm, int rows, int columns, int seed = 0) {
dim3 block(16, 16);
dim3 grid(
(rows + block.x - 1) / block.x,
(columns + block.y - 1) / block.y
);
InitializeMatrix_kernel<<< grid, block >>>(matrix, ldm, rows, columns, seed);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocates device memory for a matrix then fills with arbitrary small integers.
cudaError_t AllocateMatrix(double **matrix, int ldm, int rows, int columns, int seed = 0) {
cudaError_t result;
size_t sizeof_matrix = sizeof(double) * ldm * columns;
// Allocate device memory.
result = cudaMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to allocate matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Clear the allocation.
result = cudaMemset(*matrix, 0, sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to clear matrix device memory: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Initialize matrix elements to arbitrary small integers.
result = InitializeMatrix(*matrix, ldm, rows, columns, seed);
if (result != cudaSuccess) {
std::cerr << "Failed to initialize matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
return result;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Naive reference SYRK computation.
__global__ void ReferenceSyrk_kernel(
int N,
int K,
double alpha,
double const *A,
int lda,
double beta,
double *C,
int ldc) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < N && j < N && i >= j ) { // Since C is in Lower Fill Mode
double accumulator = 0;
for (int k = 0; k < K; ++k) {
accumulator += A[i + k * lda] * A[j + k * lda];
}
C[i + j * ldc] = alpha * accumulator + beta * C[i + j * ldc];
}
}
/// Reference SYRK computation.
cudaError_t ReferenceSyrk(
int N,
int K,
double alpha,
double const *A,
int lda,
double beta,
double *C,
int ldc) {
dim3 block(16, 16);
dim3 grid(
(N + block.x - 1) / block.x,
(N + block.y - 1) / block.y
);
ReferenceSyrk_kernel<<< grid, block >>>(N, K, alpha, A, lda, beta, C, ldc);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a double-precision
/// CUTLASS SYRK kernel.
cudaError_t TestCutlassSyrk(int N, int K, double alpha, double beta) {
cudaError_t result;
//
// Define several matrices to be used as operands to SYRK kernels.
//
// Compute leading dimensions for each matrix.
int lda = N;
int ldc = N;
// Compute size in bytes of the C matrix.
size_t sizeof_C = sizeof(double) * ldc * N;
// Define pointers to matrices in GPU device memory.
double *A;
double *C_cutlass;
double *C_reference;
//
// Allocate matrices in GPU device memory with arbitrary seeds.
//
result = AllocateMatrix(&A, lda, N, K, 0);
if (result != cudaSuccess) {
return result;
}
result = AllocateMatrix(&C_cutlass, ldc, N, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
return result;
}
result = AllocateMatrix(&C_reference, ldc, N, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(C_cutlass);
return result;
}
result = cudaMemcpy(C_reference, C_cutlass, sizeof_C, cudaMemcpyDeviceToDevice);
if (result != cudaSuccess) {
std::cerr << "Failed to copy C_cutlass matrix to C_reference: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
return result;
}
//
// Launch CUTLASS SYRK.
//
result = CutlassSsyrkNN(N, K, alpha, A, lda, beta, C_cutlass, ldc);
if (result != cudaSuccess) {
std::cerr << "CUTLASS SYRK kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
return result;
}
//
// Verify.
//
// Launch reference SYRK
result = ReferenceSyrk(N, K, alpha, A, lda, beta, C_reference, ldc);
if (result != cudaSuccess) {
std::cerr << "Reference SYRK kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
return result;
}
// Copy to host and verify equivalence.
std::vector<double> host_cutlass(ldc * N, 0);
std::vector<double> host_reference(ldc * N, 0);
result = cudaMemcpy(host_cutlass.data(), C_cutlass, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy CUTLASS SYRK results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
return result;
}
result = cudaMemcpy(host_reference.data(), C_reference, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy Reference SYRK results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
return result;
}
//
// Free device memory allocations.
//
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(A);
//
// Test for bit equivalence of results.
//
if (host_cutlass != host_reference) {
std::cerr << "CUTLASS results incorrect." << std::endl;
return cudaErrorUnknown;
}
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to basic_syrk example.
//
// usage:
//
// 00_basic_syrk <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
bool notSupported = false;
// CUTLASS must be compiled with CUDA 11 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "NVIDIA Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "This example requires compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
//
// Parse the command line to obtain SYRK dimensions and scalar values.
//
// SYRK problem dimensions.
int problem[2] = { 128, 128 };
for (int i = 1; i < argc && i < 3; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Scalars used for linear scaling the result of the matrix product.
double scalars[2] = { 1, 0 };
for (int i = 3; i < argc && i < 5; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 3];
}
//
// Run the CUTLASS SYRK test.
//
cudaError_t result = TestCutlassSyrk(
problem[0], // SYRK N dimension
problem[1], // SYRK K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| examples/31_basic_syrk/basic_syrk.cu/0 | {
"file_path": "examples/31_basic_syrk/basic_syrk.cu",
"repo_id": "examples",
"token_count": 5258
} | 4 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief A file contains all functioning classes needed by GemmLayernorm.
GemmLayernorm example = GEMM0 with partial reduction fused in epilogue (EpilogueVisitorLayerNorm)
+ lightweight full reduction kernel (ApplyFinalReduction)
+ GEMM1 with elemenwise operations fused in mainloop (GemmLayernormMainloopFusion)
*/
#pragma once
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <cmath>
#include <iostream>
#include <vector>
#include <limits>
#include "cutlass/cutlass.h"
#include "cutlass/arch/memory.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/device/gemm_layernorm_mainloop_fusion.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_complex.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/epilogue/threadblock/epilogue_with_visitor.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "gemm_with_epilogue_visitor.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementVariance_,
typename ElementMean_,
typename ElementLayernormCompute_,
typename ElementOutput,
typename ThreadblockShape_,
bool IsShiftedVariance_ = false
>
class ApplyFinalReduction {
public:
using ElementVariance = ElementVariance_;
using ElementMean = ElementMean_;
using ElementLayernormCompute = ElementLayernormCompute_;
using ThreadblockShape = ThreadblockShape_;
// Pre-processing has ensured the layout equivelent to RowMajor
using Layout = cutlass::layout::RowMajor;
using TensorVariance = TensorRef<ElementVariance, Layout>;
using TensorMean = TensorRef<ElementMean, Layout>;
static bool const kIsShiftedVariance = IsShiftedVariance_;
//
// Arguments
//
struct Arguments {
MatrixCoord extent; ///< Extent of D and Layernorm matrices
TensorVariance ref_Variance; ///< Sum Square or Variance tensor (input / output)
TensorMean ref_Mean; ///< Sum or Mean tensor (input / output)
ElementOutput *ptr_Shifted_K; ///< Shifted K tensor pointer
//
// Methods
//
Arguments(){ }
Arguments(
MatrixCoord extent_,
TensorVariance ref_Variance_,
TensorMean ref_Mean_,
ElementOutput *ptr_Shifted_K_
):
extent(extent_),
ref_Variance(ref_Variance_),
ref_Mean(ref_Mean_),
ptr_Shifted_K(ptr_Shifted_K_)
{
}
};
struct SharedStorage {
};
//
// Params struct
//
struct Params {
Arguments args;
//
// Methods
//
Params() { }
Params(Arguments const &args_): args(args_) { }
};
private:
public:
CUTLASS_DEVICE
ApplyFinalReduction() { }
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
apply(params, shared_storage);
}
private:
/// Partial reduction
CUTLASS_DEVICE
void apply(Params const ¶ms, SharedStorage &shared_storage) {
int threadblock_num = (params.args.extent.column() + ThreadblockShape::kM - 1) / ThreadblockShape::kM;
int block_n = blockIdx.x * blockDim.x;
int thread_n = threadIdx.x;
int idx_n = block_n + thread_n;
if (idx_n >= params.args.extent.row()) {
return;
}
using ConvertVarianceOutput = cutlass::NumericConverter<ElementVariance, ElementLayernormCompute>;
using ConvertMeanOutput = cutlass::NumericConverter<ElementMean, ElementLayernormCompute>;
using ConvertVariance = cutlass::NumericConverter<ElementLayernormCompute, ElementVariance>;
using ConvertMean = cutlass::NumericConverter<ElementLayernormCompute, ElementMean>;
using ConvertShiftK = cutlass::NumericConverter<ElementLayernormCompute, ElementOutput>;
ConvertVariance convert_variance;
ConvertMean convert_mean;
ConvertVarianceOutput convert_variance_output;
ConvertMeanOutput convert_mean_output;
ElementVariance *access_square = params.args.ref_Variance.data() + idx_n;
ElementMean *access_mean = params.args.ref_Mean.data() + idx_n;
ElementVariance *access_square_bak = access_square;
ElementMean *access_mean_bak = access_mean;
ElementLayernormCompute frag_square_sum = ElementLayernormCompute(0);
ElementLayernormCompute frag_element_sum = ElementLayernormCompute(0);
ElementVariance fetch_square;
ElementMean fetch_mean;
CUTLASS_PRAGMA_UNROLL
for (int idx_m = 0; idx_m < threadblock_num; idx_m++) {
arch::global_load<ElementVariance, sizeof(ElementVariance)>(fetch_square, access_square, true);
arch::global_load<ElementMean, sizeof(ElementMean)>(fetch_mean, access_mean, true);
frag_element_sum += convert_mean(fetch_mean);
frag_square_sum += convert_variance(fetch_square);
access_square += params.args.extent.row();
access_mean += params.args.extent.row();
}
ElementLayernormCompute mean = frag_element_sum;
ElementLayernormCompute square_mean = frag_square_sum;
ElementLayernormCompute variance;
if (kIsShiftedVariance && params.args.ptr_Shifted_K != nullptr) {
ElementOutput *access_shift_k = params.args.ptr_Shifted_K + idx_n;
ElementOutput fetch_shift_k;
ConvertShiftK convert_shift_k;
arch::global_load<ElementOutput, sizeof(ElementOutput)>(fetch_shift_k, access_shift_k, true);
ElementLayernormCompute shifted_mean = mean - convert_shift_k(fetch_shift_k);
variance = cutlass::constants::one<ElementLayernormCompute>() / cutlass::fast_sqrt(square_mean - shifted_mean * shifted_mean + ElementLayernormCompute(1e-6));
}else{
variance = cutlass::constants::one<ElementLayernormCompute>() / cutlass::fast_sqrt(square_mean - mean * mean + ElementLayernormCompute(1e-6));
}
mean = -mean * variance;
access_square = access_square_bak;
access_mean = access_mean_bak;
access_square[0] = convert_variance_output(variance);
access_mean[0] = convert_mean_output(mean);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ThreadblockShape_,
int ThreadCount,
typename OutputTileIterator_,
typename AccumulatorTile_,
typename ElementAccumulator_,
typename ElementVariance_,
typename ElementMean_,
typename ElementLayernormCompute_,
typename ElementwiseFunctor_,
bool IsShiftedVariance_ = false
>
class EpilogueVisitorLayerNorm {
public:
using ElementVariance = ElementVariance_;
using ElementMean = ElementMean_;
using ElementLayernormCompute = ElementLayernormCompute_;
using AccumulatorTile = AccumulatorTile_;
using ThreadblockShape = ThreadblockShape_;
static int const kThreadCount = ThreadCount;
using OutputTileIterator = OutputTileIterator_;
using ElementwiseFunctor = ElementwiseFunctor_;
static int const kIterations = OutputTileIterator::kIterations;
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
static int const kRowIterations = OutputTileIterator::ThreadMap::Iterations::kRow;
static int const kThreads = OutputTileIterator::ThreadMap::kThreads;
static bool const kIsShiftedVariance = IsShiftedVariance_;
using ElementOutput = typename OutputTileIterator::Element;
static int const kDeltaRow = OutputTileIterator::ThreadMap::Delta::kRow;
/// Array type used in Shift-K Layernorm
static int const kRowAccessCount = kIterations * kRowIterations;
using ConvertedShiftFragment = Array<ElementLayernormCompute, kRowAccessCount>;
// Conducts manual transpose externally (already supported) for column major
using LayoutOutput = cutlass::layout::RowMajor;
using ElementAccumulator = ElementAccumulator_;
using AccumulatorFragment = Array<ElementAccumulator, kElementsPerAccess>;
using LayernormFragment = Array<ElementLayernormCompute, kElementsPerAccess>;
using OutputVector = Array<ElementOutput, kElementsPerAccess>;
using TensorRefD = TensorRef<ElementOutput, LayoutOutput>;
static int const kThreadsPerRow = OutputTileIterator::ThreadMap::Detail::RowArrangement::Detail::kShapeWidth;
static int const kThreadsInColumn = kThreads / kThreadsPerRow;
static int const kHalfThreadsPerRow = (kThreadsPerRow >> 1);
/// Argument structure
struct Arguments {
typename ElementwiseFunctor::Params elementwise;
TensorRefD ref_C;
TensorRefD ref_D;
ElementVariance *ptr_Variance;
ElementMean *ptr_Mean;
ElementOutput *ptr_Shifted_K;
//
// Methods
//
Arguments():
ptr_Variance(nullptr),
ptr_Mean(nullptr),
ptr_Shifted_K(nullptr)
{
}
Arguments(
typename ElementwiseFunctor::Params elementwise_,
TensorRefD ref_C_,
TensorRefD ref_D_,
ElementVariance *ptr_Variance,
ElementMean *ptr_Mean_,
ElementOutput *ptr_Shifted_K_ = nullptr
):
elementwise(elementwise_),
ref_C(ref_C_),
ref_D(ref_D_),
ptr_Variance(ptr_Variance),
ptr_Mean(ptr_Mean_),
ptr_Shifted_K(ptr_Shifted_K_)
{
}
};
struct Params {
typename ElementwiseFunctor::Params elementwise;
typename OutputTileIterator::Params params_C;
typename OutputTileIterator::Params params_D;
typename OutputTileIterator::Element *ptr_C;
typename OutputTileIterator::Element *ptr_D;
ElementVariance *ptr_Variance;
ElementMean *ptr_Mean;
ElementOutput *ptr_Shifted_K;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params():
ptr_D(nullptr),
ptr_Variance(nullptr),
ptr_Mean(nullptr)
{
}
CUTLASS_HOST_DEVICE
Params(Arguments const &args):
elementwise(args.elementwise),
params_C(args.ref_C.layout()),
params_D(args.ref_D.layout()),
ptr_C(args.ref_C.data()),
ptr_D(args.ref_D.data()),
ptr_Variance(args.ptr_Variance),
ptr_Mean(args.ptr_Mean),
ptr_Shifted_K(args.ptr_Shifted_K)
{
}
};
/// Shared storage
struct SharedStorage {
};
private:
Params const & params_;
SharedStorage & shared_storage_;
MatrixCoord extent_;
ElementwiseFunctor elementwise_;
OutputTileIterator iterator_C_;
OutputTileIterator iterator_D_;
typename OutputTileIterator::Fragment fragment_C_;
typename OutputTileIterator::Fragment fragment_D_;
ElementAccumulator alpha_;
ElementAccumulator beta_;
ConvertedShiftFragment shift_k_frag_;
ElementLayernormCompute accum_sum_square_;
ElementLayernormCompute accum_sum_element_;
MatrixCoord thread_offset_;
public:
CUTLASS_DEVICE
EpilogueVisitorLayerNorm(
Params const ¶ms, ///< Parameters routed to the epilogue
SharedStorage &shared_storage, ///< Shared storage needed by the functors here
MatrixCoord const &problem_size0, ///< Problem size of the output
int thread_idx, ///< Thread index within the threadblock
int warp_idx, ///< Warp index within the threadblock
int lane_idx, ///< Lane index within the warp
MatrixCoord const &threadblock_offset = MatrixCoord(0, 0)
):
params_(params),
shared_storage_(shared_storage),
extent_(problem_size0),
elementwise_(params.elementwise),
iterator_C_(params.params_C, params.ptr_C, problem_size0, thread_idx, threadblock_offset),
iterator_D_(params.params_D, params.ptr_D, problem_size0, thread_idx, threadblock_offset)
{
alpha_ = (params.elementwise.alpha_ptr ? *params.elementwise.alpha_ptr : params.elementwise.alpha);
beta_ = (params.elementwise.beta_ptr ? *params.elementwise.beta_ptr : params.elementwise.beta);
if (beta_ == ElementAccumulator()) {
iterator_C_.clear_mask();
}
}
/// Helper to indicate split-K behavior
CUTLASS_DEVICE
void set_k_partition(
int split_k_index, ///< Index of this threadblock within split-K partitioned scheme
int split_k_slices) { ///< Total number of split-K slices
}
/// Called to set the batch index
CUTLASS_DEVICE
void set_batch_index(int batch_idx) {
}
/// Called at the start of the epilogue just before iterating over accumulator slices
CUTLASS_DEVICE
void begin_epilogue() {
// If shift-K feature is enabled, we load shift-k fragment
// at the very beginning of an epilogue
if (kIsShiftedVariance && params_.ptr_Shifted_K != nullptr) {
shift_k_frag_.clear();
int thread_offset_row_base = iterator_D_.thread_start_row();
CUTLASS_PRAGMA_UNROLL
for (int iter_idx = 0; iter_idx < kIterations; ++iter_idx) {
int step_offset = iter_idx * OutputTileIterator::Shape::kRow;
CUTLASS_PRAGMA_UNROLL
for (int rid = 0; rid < kRowIterations; ++rid) {
int row_step_offset = rid * kDeltaRow;
int row_offset = thread_offset_row_base + step_offset + row_step_offset;
bool is_load = (row_offset < extent_.row());
shift_k_frag_[iter_idx * kRowIterations + rid] = load_shift_k_(row_offset, is_load);
}
}
}
}
/// Called at the start of one step before starting accumulator exchange
CUTLASS_DEVICE
void begin_step(int step_idx) {
fragment_D_.clear();
if (elementwise_.kScale != cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling) {
fragment_C_.clear();
iterator_C_.load(fragment_C_);
++iterator_C_;
}
}
/// Called at the start of a row
CUTLASS_DEVICE
void begin_row(int row_idx) {
}
/// Called after accumulators have been exchanged for each accumulator vector
CUTLASS_DEVICE
void visit(
int iter_idx,
int row_idx,
int column_idx,
int frag_idx,
AccumulatorFragment const &accum) {
using Mul = cutlass::multiplies<ElementLayernormCompute>;
using Minus = cutlass::minus<ElementLayernormCompute>;
using Exp = cutlass::fast_exp_op<ElementLayernormCompute>;
[[maybe_unused]] Minus minus;
[[maybe_unused]] Mul mul;
[[maybe_unused]] Exp exponential;
LayernormFragment result;
thread_offset_ =
iterator_D_.thread_start() +
OutputTileIterator::ThreadMap::iteration_offset(frag_idx);
NumericArrayConverter<ElementLayernormCompute, ElementOutput, kElementsPerAccess> source_converter;
OutputVector &source_vector = reinterpret_cast<OutputVector *>(&fragment_C_)[frag_idx];
bool column_guard = (thread_offset_.column() < extent_.column());
if (elementwise_.kScale == cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling) {
result = source_converter(elementwise_(accum));
}else{
result = source_converter(elementwise_(accum, source_vector));
}
ElementLayernormCompute inv_scalar = cutlass::constants::one<ElementLayernormCompute>() / ElementLayernormCompute(extent_.column());
// Fragment is cleared for non-reachable columns so no need to check against column guard
accum_sum_element_ = element_sum_accumulator_(result);
// Square sum is different. Non-reachable columns should've been computed for shift-k
// Otherwise we will incorrectly have some extra k^2 added into square sum.
if (column_guard) {
accum_sum_square_ = (kIsShiftedVariance) ? \
square_sum_accumulator_(result, shift_k_frag_[iter_idx * kRowIterations + row_idx]) : \
square_sum_accumulator_(result);
}
else {
accum_sum_square_ = ElementLayernormCompute(0);
}
accum_sum_element_ *= inv_scalar;
accum_sum_square_ *= inv_scalar;
// After performing the in-thread reduction, we then perform cross-thread / in-warp reduction
CUTLASS_PRAGMA_UNROLL
for (int i = kHalfThreadsPerRow; i > 0; i >>= 1) {
accum_sum_element_ += __shfl_xor_sync(0xFFFFFFFF, accum_sum_element_, i);
accum_sum_square_ += __shfl_xor_sync(0xFFFFFFFF, accum_sum_square_, i);
}
// Convert to the output
NumericArrayConverter<ElementOutput, ElementLayernormCompute, kElementsPerAccess> output_converter;
OutputVector &output = reinterpret_cast<OutputVector *>(&fragment_D_)[frag_idx];
output = output_converter(result);
}
/// Called at the start of a row
CUTLASS_DEVICE
void end_row(int row_idx) {
using ConvertVarianceOutput = cutlass::NumericConverter<ElementVariance, ElementLayernormCompute>;
using ConvertMeanOutput = cutlass::NumericConverter<ElementMean, ElementLayernormCompute>;
ConvertVarianceOutput convert_variance_output;
ConvertMeanOutput convert_mean_output;
bool is_write_thread = (thread_offset_.row() < extent_.row() && (threadIdx.x % kThreadsPerRow) == 0);
int row_offset = thread_offset_.row() + blockIdx.y * extent_.row();
ElementVariance *curr_ptr_sum_square = params_.ptr_Variance + row_offset;
ElementMean *curr_ptr_element_sum = params_.ptr_Mean + row_offset;
arch::global_store<ElementVariance, sizeof(ElementVariance)>(
convert_variance_output(accum_sum_square_),
(void *)curr_ptr_sum_square,
is_write_thread);
arch::global_store<ElementMean, sizeof(ElementMean)>(
convert_mean_output(accum_sum_element_),
(void *)curr_ptr_element_sum,
is_write_thread);
}
/// Called after all accumulator elements have been visited
CUTLASS_DEVICE
void end_step(int step_idx) {
iterator_D_.store(fragment_D_);
++iterator_D_;
}
/// Called after all steps have been completed
CUTLASS_DEVICE
void end_epilogue() {
}
private:
CUTLASS_DEVICE
ElementLayernormCompute load_shift_k_(int row_offset, bool is_load) {
using ConvertShiftK = cutlass::NumericConverter<ElementLayernormCompute, ElementOutput>;
ConvertShiftK convert_shift_k;
ElementOutput shift_k_val;
// Computes the address to load shift_k element
ElementOutput *curr_ptr_shift_k = params_.ptr_Shifted_K + row_offset;
// Conditionally loads from global memory
arch::global_load<ElementOutput, sizeof(ElementOutput)>(shift_k_val, (void *)curr_ptr_shift_k, is_load);
// Converts data type to return
ElementLayernormCompute converted_shift_k_val = convert_shift_k(shift_k_val);
return converted_shift_k_val;
}
CUTLASS_DEVICE
ElementLayernormCompute square_sum_accumulator_(LayernormFragment const &accum) {
ElementLayernormCompute sum_ = ElementLayernormCompute(0);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < LayernormFragment::kElements; ++i) {
auto accum_ = accum[i];
sum_ += accum_ * accum_;
}
return sum_;
}
CUTLASS_DEVICE
ElementLayernormCompute square_sum_accumulator_(LayernormFragment const &accum, ElementLayernormCompute shift_k_val) {
ElementLayernormCompute sum_ = ElementLayernormCompute(0);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < LayernormFragment::kElements; ++i) {
auto accum_ = accum[i] - shift_k_val;
sum_ += accum_ * accum_;
}
return sum_;
}
CUTLASS_DEVICE
ElementLayernormCompute element_sum_accumulator_(LayernormFragment const &accum) {
ElementLayernormCompute sum_ = ElementLayernormCompute(0);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < LayernormFragment::kElements; ++i) {
sum_ += accum[i];
}
return sum_;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
/////////////////////////////////////////////////////////////////////////////////////////////////
///
template <
typename ElementInputA0_,
typename LayoutInputA0_,
typename ElementInputB0_,
typename LayoutInputB0_,
typename ElementOutput_,
typename LayoutOutput_,
typename ElementCompute_,
typename EpilogueFunctorOp_,
typename ThreadblockShape_,
typename WarpShape_,
typename InstructionShape_,
int Stages0,
int Stages1,
bool IsShiftedVariance_ = false
>
class GemmLayernorm {
public:
///////////////////////////////////////////////////////////////////////////////////////////////
//
// Type definitions
//
static bool const kInternalTranspose = cutlass::platform::is_same<LayoutOutput_, cutlass::layout::ColumnMajor>::value;
static bool const kIsShiftedVariance = IsShiftedVariance_;
// These is mandatory layout.
using LayoutInputScaleBias = cutlass::layout::RowMajor;
// These are mandatory data types.
using ElementLayernormCompute = float;
using ElementInputScaleBias = cutlass::half_t;
// These are mandatory params required by mainloop fusion
using OperatorClass = cutlass::arch::OpClassTensorOp;
using ArchTag = cutlass::arch::Sm80;
// These are mandatory layouts and data types
// that are inheritated from pre-defined params
using LayoutSumSqr = LayoutInputScaleBias;
using LayoutSum = LayoutInputScaleBias;
using ElementMean = ElementInputScaleBias;
using ElementVariance = ElementInputScaleBias;
///////////////////////////////////////////////////////////////////////////////////////////////
using LayoutInputA0 = LayoutInputA0_;
using LayoutInputB0 = LayoutInputB0_;
using LayoutInputA1 = LayoutOutput_;
using LayoutInputB1 = LayoutOutput_;
using LayoutOutputC0 = LayoutOutput_;
using LayoutOutputC1 = LayoutOutput_;
using ElementInputA0 = ElementInputA0_;
using ElementInputB0 = ElementInputB0_;
using ElementOutputC0 = ElementOutput_;
using ElementCompute = ElementCompute_;
using ElementInputB1 = ElementInputB0_;
using ElementInputA1 = ElementOutputC0;
using ElementOutputC1 = ElementOutputC0;
using EpilogueFunctorOp = EpilogueFunctorOp_;
using TensorRefA = TensorRef<ElementInputA0, LayoutInputA0>;
using TensorRefB = TensorRef<ElementInputB0, LayoutInputB0>;
using TensorRefC = TensorRef<ElementOutputC0, LayoutOutputC0>;
using TensorVariance = TensorRef<ElementVariance, LayoutSumSqr>;
using TensorMean = TensorRef<ElementMean, LayoutSum>;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
static int const kStages0 = Stages0;
static int const kStages1 = Stages1;
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
///////////////////////////////////////////////////////////////////////////////////////////////
using MapArguments = cutlass::gemm::kernel::detail::MapArguments<
ElementInputA0,
LayoutInputA0,
cutlass::ComplexTransform::kNone,
128 / cutlass::sizeof_bits<ElementInputA0>::value,
ElementInputB0,
LayoutInputB0,
cutlass::ComplexTransform::kNone,
128 / cutlass::sizeof_bits<ElementInputB0>::value,
LayoutOutputC0,
kInternalTranspose
>;
using DefaultGemmKernel = typename cutlass::gemm::kernel::DefaultGemm<
typename MapArguments::ElementA,
typename MapArguments::LayoutA,
MapArguments::kAlignmentA,
typename MapArguments::ElementB,
typename MapArguments::LayoutB,
MapArguments::kAlignmentB,
ElementOutputC0,
typename MapArguments::LayoutC,
ElementCompute,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueFunctorOp,
SwizzleThreadBlock,
kStages0,
true,
typename cutlass::gemm::device::DefaultGemmConfiguration<
OperatorClass, ArchTag, ElementInputA0, ElementInputB0, ElementOutputC0, ElementCompute>::Operator,
cutlass::gemm::SharedMemoryClearOption::kNone
>::GemmKernel;
///////////////////////////////////////////////////////////////////////////////////////////////
// Epilogue visitor
using EpilogueVisitor = kernel::EpilogueVisitorLayerNorm<
ThreadblockShape,
DefaultGemmKernel::kThreadCount,
typename DefaultGemmKernel::Epilogue::OutputTileIterator,
typename DefaultGemmKernel::Epilogue::AccumulatorFragmentIterator::AccumulatorTile,
ElementCompute,
ElementVariance,
ElementMean,
ElementLayernormCompute,
EpilogueFunctorOp,
kIsShiftedVariance
>;
/// Epilogue
using Epilogue = typename cutlass::epilogue::threadblock::EpilogueWithVisitorFromExistingEpilogue<
EpilogueVisitor,
typename DefaultGemmKernel::Epilogue
>::Epilogue;
// GEMM
using GemmEpilogueFusion = gemm::kernel::GemmWithEpilogueVisitor<
typename DefaultGemmKernel::Mma,
Epilogue,
SwizzleThreadBlock
>;
using ApplyFinalReductionKernel = kernel::ApplyFinalReduction<
ElementVariance,
ElementMean,
ElementLayernormCompute,
ElementOutputC0,
ThreadblockShape,
kIsShiftedVariance
>;
using GemmMainloopFusion = typename cutlass::gemm::device::GemmLayernormMainloopFusion<
ElementInputA1, LayoutInputA1,
ElementInputB1, LayoutInputB1,
ElementInputScaleBias, LayoutInputScaleBias,
ElementOutputC1, LayoutOutputC1,
ElementCompute,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueFunctorOp,
SwizzleThreadBlock,
kStages1
>;
public:
/// Arguments class
struct Arguments {
typename GemmEpilogueFusion::Arguments gemm0;
typename GemmMainloopFusion::Arguments gemm1;
typename ApplyFinalReductionKernel::Arguments reduction;
cutlass::gemm::GemmCoord extend;
//
// Methods
//
Arguments() { }
Arguments(
cutlass::gemm::GemmCoord problem_size0,
cutlass::gemm::GemmCoord problem_size1,
ElementInputA0 * ptr_A,
ElementInputB0 * ptr_B,
ElementOutputC0 * ptr_C,
ElementOutputC0 * ptr_D,
ElementOutputC0 * ptr_E,
ElementOutputC0 * ptr_O,
int64_t ldm_A,
int64_t ldm_B,
int64_t ldm_C,
int64_t ldm_D,
int64_t ldm_E,
int64_t ldm_O,
typename EpilogueFunctorOp::Params linear_scaling,
TensorVariance ref_Variance_,
TensorMean ref_Mean_,
TensorVariance ref_Gamma_,
TensorMean ref_Beta_,
ElementOutputC0 *ptr_Shifted_K = nullptr
):
gemm0(
cutlass::gemm::GemmUniversalMode::kGemm,
{kInternalTranspose ? problem_size0.n() : problem_size0.m(),\
kInternalTranspose ? problem_size0.m() : problem_size0.n(),\
problem_size0.k()},
{kInternalTranspose ? ptr_B : ptr_A, \
kInternalTranspose ? ldm_B : ldm_A},
{kInternalTranspose ? ptr_A : ptr_B, \
kInternalTranspose ? ldm_A : ldm_B},
typename EpilogueVisitor::Arguments(
linear_scaling,
{ptr_C, ldm_C},
{ptr_D, ldm_D},
ref_Variance_.data(),
ref_Mean_.data(),
ptr_Shifted_K
)
),
reduction(
MatrixCoord(kInternalTranspose ? problem_size0.n() : problem_size0.m(),\
kInternalTranspose ? problem_size0.m() : problem_size0.n()),
ref_Variance_,
ref_Mean_,
ptr_Shifted_K
),
gemm1(
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size1,
1,
linear_scaling,
kInternalTranspose ? ptr_E : ptr_D,
kInternalTranspose ? ptr_D : ptr_E,
ref_Variance_.data(),
ref_Mean_.data(),
ref_Gamma_.data(),
ref_Beta_.data(),
ptr_O,
ptr_O,
problem_size1.m() * problem_size1.k(),
problem_size1.n() * problem_size1.k(),
problem_size1.n(),
problem_size1.n(),
problem_size1.k(),
problem_size1.k(),
problem_size1.m() * problem_size1.n(),
problem_size1.m() * problem_size1.n(),
kInternalTranspose ? ldm_E : ldm_D,
kInternalTranspose ? ldm_D : ldm_D,
ref_Variance_.layout().stride(0),
ref_Mean_.layout().stride(0),
ref_Gamma_.layout().stride(0),
ref_Beta_.layout().stride(0),
ldm_O,
ldm_O
),
extend(problem_size0)
{
}
};
struct Params {
typename GemmEpilogueFusion::Params gemm0;
typename ApplyFinalReductionKernel::Params reduction;
MatrixCoord extend;
//
// Methods
//
Params() { }
Params(Arguments const &args):
gemm0(args.gemm0),
reduction(args.reduction),
extend(MatrixCoord(args.extend.m(), args.extend.n()))
{
}
};
public:
// Gemm
//
// Methods
//
private:
Params params_;
GemmMainloopFusion gemm_fusion_op;
public:
/// Ctor
GemmLayernorm() {
}
/// Initialize
Status initialize(Arguments const &args) {
params_ = Params(args);
cutlass::Status status;
size_t workspace_size = gemm_fusion_op.get_workspace_size(args.gemm1);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
status = gemm_fusion_op.can_implement(args.gemm1);
CUTLASS_CHECK(status);
status = gemm_fusion_op.initialize(args.gemm1, workspace.get());
CUTLASS_CHECK(status);
return cutlass::Status::kSuccess;
}
/// Run
Status run(cudaStream_t stream) {
//
// Launch the GEMM + layernorm kernel
//
dim3 gemm_grid = SwizzleThreadBlock().get_grid_shape(params_.gemm0.grid_tiled_shape);
dim3 gemm_block(GemmEpilogueFusion::kThreadCount, 1, 1);
int gemm_smem_size = int(sizeof(typename GemmEpilogueFusion::SharedStorage));
cutlass::Kernel<GemmEpilogueFusion><<<gemm_grid, gemm_block, gemm_smem_size, stream>>>(params_.gemm0);
cudaError_t result = cudaGetLastError();
if (result != cudaSuccess) {
return cutlass::Status::kErrorInternal;
}
//
// Launch the ApplyFinalReductionKernel
//
// always performs reduction from leading dimension
int leading_dim_0 = kInternalTranspose ? params_.extend.row() : params_.extend.column();
int leading_dim_1 = kInternalTranspose ? params_.extend.column() : params_.extend.row();
int thread_per_block = 128;
int block_per_row = (leading_dim_1 + thread_per_block - 1) / thread_per_block;
if (block_per_row < 4) {
thread_per_block = 32;
block_per_row = (leading_dim_1 + thread_per_block - 1) / thread_per_block;
}
dim3 final_reduction_block(thread_per_block);
dim3 final_reduction_grid(block_per_row);
Kernel<ApplyFinalReductionKernel><<<
final_reduction_grid, final_reduction_block, sizeof(typename ApplyFinalReductionKernel::SharedStorage), stream
>>>(params_.reduction);
result = cudaGetLastError();
if (result != cudaSuccess) {
return cutlass::Status::kErrorInternal;
}
//
// Launch the GEMM + mainloop fusion kernel
//
cutlass::Status status = gemm_fusion_op();
CUTLASS_CHECK(status);
return cutlass::Status::kSuccess;
}
/// Function call operator
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/37_gemm_layernorm_gemm_fusion/gemm_with_layernorm.h/0 | {
"file_path": "examples/37_gemm_layernorm_gemm_fusion/gemm_with_layernorm.h",
"repo_id": "examples",
"token_count": 12997
} | 5 |
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run group convolution kernels using functions and data structures
provided by CUTLASS using tensor cores; which we run on a NVIDIA Ampere GPU.
There are 2 group conv mode:
1. cutlass::conv::GroupMode::kSingleGroup
This mode is for large K problem size: k_per_group (K/groups) equals or larger than
threadblock_tile_N. One or multiple threadblocks calculate data of one group.
2. cutlass::conv::GroupMode::kMultipleGroup
This mode is for small K problem size: k_per_group (K/groups) is smaller than threadblock_tile_N.
One threadblock will calculate data from more than one group.
Function profile_convolution_selecter() shows how to choose kernel with different group mode according
to problem size and threadblock_tile size.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_group_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 64>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 3;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
// Analytic kernel and operation for single group problem size
using AnalyticSingleGroupKernel = typename cutlass::conv::kernel::DefaultConv2dGroupFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::GroupMode::kSingleGroup,
cutlass::conv::IteratorAlgorithm::kAnalytic
>::Kernel;
using AnalyticSingleGroupOperation = cutlass::conv::device::ImplicitGemmConvolution<AnalyticSingleGroupKernel>;
// Analytic kernel and operation for multiple group problem size
using AnalyticMultipleGroupKernel = typename cutlass::conv::kernel::DefaultConv2dGroupFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::GroupMode::kMultipleGroup,
cutlass::conv::IteratorAlgorithm::kAnalytic
>::Kernel;
using AnalyticMultipleGroupOperation = cutlass::conv::device::ImplicitGemmConvolution<AnalyticMultipleGroupKernel>;
// Optimized kernel and operation for single group problem size
using OptimizedSingleGroupKernel = typename cutlass::conv::kernel::DefaultConv2dGroupFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
cutlass::conv::GroupMode::kSingleGroup,
cutlass::conv::IteratorAlgorithm::kOptimized
>::Kernel;
using OptimizedSingleGroupOperation = cutlass::conv::device::ImplicitGemmConvolution<OptimizedSingleGroupKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
int groups;
bool reference_check;
bool measure_performance;
int iterations;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool optimized;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
groups(1),
reference_check(false),
measure_performance(false),
iterations(20),
alpha(1),
beta(0),
optimized(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("optimized")) {
optimized = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
cmd.get_cmd_line_argument("g", groups);
filter_size.c() = input_size.c() / groups;
cmd.get_cmd_line_argument("u", conv_stride.row());
cmd.get_cmd_line_argument("v", conv_stride.column());
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "42_ampere_tensorop_group_conv example\n\n"
<< " This example uses Ampere's Tensor Core operators on F16 data types to compute\n"
<< " forward grouped convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --g=<int> Conv groups G\n\n"
<< " --u=<int> Conv stride_h\n\n"
<< " --v=<int> Conv stride_w\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --optimized If set (true), use optimized kernel, otherwise use analytic kernel.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --tag=<string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/42_ampere_tensorop_group_conv/42_ampere_tensorop_group_conv --n=4 --h=16 --w=16 --c=256 --k=128 --r=3 --s=3 --g=8 --ref-check\n\n"
<< "$ ./examples/42_ampere_tensorop_group_conv/42_ampere_tensorop_group_conv --n=4 --h=16 --w=16 --c=256 --k=128 --r=3 --s=3 --g=2 --ref-check\n\n"
<< "$ ./examples/42_ampere_tensorop_group_conv/42_ampere_tensorop_group_conv --n=4 --h=16 --w=16 --c=256 --k=128 --r=3 --s=3 --g=2 --ref-check --optimized\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,G,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< options.groups << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
template <typename Conv2dOperation>
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill tensor C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(7),
ElementOutput(-8),
0);
// Fill tensor D on host with zeros
cutlass::reference::host::TensorFill(
tensor_d.host_view());
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices,
options.groups
);
// Construct Conv2dOperation::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename Conv2dOperation::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
Conv2dOperation implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on device...\n";
// Compute with reference implementation
cutlass::reference::device::Conv2dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_ref_d.device_ref(),
options.alpha,
options.beta
);
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_d.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
} else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
} else {
result.reference_check = cutlass::Status::kInvalid;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Result profile_convolution_selecter(Options const &options) {
int k_per_group = options.filter_size.n() / options.groups;
// In group conv, if k_per_group < threadblock_N, one Threadblock will calculate multiple groups
if (k_per_group < ThreadblockShape::kN) { // MultipleGroup mode
if (options.optimized) {
std::cerr << "Invalid problem: optimized group conv kernel doesn't support MultipleGroup (one CTA calculate multiple groups) mode" << std::endl;
exit(-1);
} else {
std::cout << "Select AnalyticMultipleGroupOperation\n";
return profile_convolution<AnalyticMultipleGroupOperation>(options);
}
} else { // SingleGroup mode
if (options.optimized) {
std::cout << "Select OptimizedSingleGroupOperation\n";
return profile_convolution<OptimizedSingleGroupOperation>(options);
} else {
std::cout << "Select AnalyticSingleGroupOperation\n";
return profile_convolution<AnalyticSingleGroupOperation>(options);
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major > 8 || (props.major == 8 && props.minor >= 0))) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution_selecter(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| examples/42_ampere_tensorop_group_conv/ampere_tensorop_group_conv.cu/0 | {
"file_path": "examples/42_ampere_tensorop_group_conv/ampere_tensorop_group_conv.cu",
"repo_id": "examples",
"token_count": 8701
} | 6 |
#################################################################################################
#
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import gen_ir
import helper
import gen_threadblock as gen_tb
class gen_default_Gemm:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = "B2bGemm"
self.template_param = template_param
self.b2b_num = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_B2bMma(self, specialized_template_args):
code = "using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma<\n"
code += specialized_template_args
code += ">::ThreadblockB2bMma;\n"
# print(code)
return code
def gen_epilogue(self):
epilogue_code = ""
epilogue_code += helper.var_idx("static const int kPartitionsK", self.b2b_num - 1) + helper.var_idx(" = ThreadblockShape", self.b2b_num - 1) + helper.var_idx("::kK / WarpShape", self.b2b_num - 1) + "::kK;\n"
epilogue_code += "using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<\n"
epilogue_code += " " + helper.var_idx("ThreadblockShape", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("typename B2bMma::Operator", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("kPartitionsK", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("EpilogueOutputOp", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("EpilogueOutputOp", self.b2b_num - 1) + "::kCount\n"
epilogue_code += ">::Epilogue;\n"
epilogue_code += "using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle, SplitKSerial>;\n\n"
return epilogue_code
def gen_include_header(self):
code = '''
/* Auto Generated code - Do not edit.*/
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/layout/matrix.h\"
#include \"{cutlass_dir}cutlass/numeric_types.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/epilogue.h\"
#include \"{cutlass_dir}cutlass/epilogue/thread/linear_combination.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/gemm/kernel/gemm_pipelined.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm75.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm70.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm80.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_simt.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/threadblock_swizzle.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/default_epilogue_tensor_op.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/default_epilogue_simt.h\"
#include \"{cutlass_dir}cutlass/transform/threadblock/predicated_tile_iterator.h\"
#include \"../kernel/b2b_gemm.h\"
#include \"../threadblock/default_b2b_mma.h\"
'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_code(self):
gen_using = ''
# Generate default template struct
gen_code = gen_ir.gen_template_struct("Default" + self.gen_class_name, self.template_param,"", speicalized = None, set_default=False)
filter_list = []
filter_list.append(('Stages', 2))
filter_list.append(("OperatorClass", "arch::OpClassTensorOp"))
filter_list.append(("ArchTag", "arch::Sm75"))
for i in range(self.b2b_num):
filter_list.append((helper.var_idx("LayoutC", i), "layout::RowMajor"))
rtn_template_args, speicalized_template_args = gen_ir.filtered_param(self.template_param, filter_list, keep_= True)
B2bMma_code = self.gen_B2bMma(speicalized_template_args)
epilogue_and_rest_code = self.gen_epilogue()
gen_special_code = gen_ir.gen_template_struct("Default" + self.gen_class_name, rtn_template_args, B2bMma_code + epilogue_and_rest_code, speicalized = speicalized_template_args, set_default=False)
code = gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("kernel", gen_code + gen_special_code)))
return self.gen_include_header() + code
class gen_Kernel:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = "B2bGemm"
self.template_param = template_param
self.b2bnum = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/matrix_coord.h\"\n'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_Params(self):
gen_param = ""
for i in range(self.b2bnum):
gen_param += " " + helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + ";\n"
gen_param += " " + "cutlass::gemm::GemmCoord grid_tiled_shape;\n"
gen_param += " " + "typename B2bMma::IteratorA0::Params params_A0;\n"
gen_param += " " + "typename B2bMma::IteratorA0::TensorRef ref_A0;\n"
for i in range(self.b2bnum):
gen_param += " " + helper.var_idx("typename B2bMma::IteratorB", i) + helper.var_idx("::Params params_B", i) + ";\n"
gen_param += " " + helper.var_idx("typename B2bMma::IteratorB", i) + helper.var_idx("::TensorRef ref_B", i) + ";\n"
if i == self.b2bnum - 1:
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::Params params_C", i) + ";\n"
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_C", i) + ";\n"
else:
gen_param += " " + helper.var_idx("typename FusedAddBiasEpilogue", i) + helper.var_idx("::OutputTileIterator::Params params_C", i) + ";\n"
gen_param += " " + helper.var_idx("typename FusedAddBiasEpilogue", i) + helper.var_idx("::OutputTileIterator::TensorRef ref_C", i) + ";\n"
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::Params params_D", self.b2bnum - 1) + ";\n"
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_D", self.b2bnum - 1) + ";\n"
for i in range(self.b2bnum):
gen_param += " " + helper.var_idx("typename OutputOp", i) + helper.var_idx("::Params output_op_", i) + ";\n"
gen_param += " " + 'int batch_count' + ";\n"
gen_param += " " + 'int gemm_k_iterations_0' + ";\n"
return gen_param
def gen_Memberfunc(self):
code_default = "\nCUTLASS_HOST_DEVICE\n"
code_default += "Params()"
code_default += " { } \n\n"
code_construct = "\nCUTLASS_HOST_DEVICE\n"
code_construct += "Params(\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("cutlass::gemm::GemmCoord const & problem_size_", i) + ",\n"
code_construct += " " + "cutlass::gemm::GemmCoord const & grid_tiled_shape,\n"
code_construct += " " + "typename B2bMma::IteratorA0::TensorRef ref_A0,\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("typename B2bMma::IteratorB", i) + helper.var_idx("::TensorRef ref_B", i) + ",\n"
if i == self.b2bnum - 1:
code_construct += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_C", i) + ",\n"
else:
code_construct += " " + helper.var_idx("typename FusedAddBiasEpilogue", i) + helper.var_idx("::OutputTileIterator::TensorRef ref_C", i) + ",\n"
code_construct += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_D", self.b2bnum - 1) + ",\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("typename OutputOp", i) + helper.var_idx("::Params output_op_", i) + helper.var_idx(" = typename OutputOp", i) + "::Params(),\n"
code_construct += " " + "int batch_count = 1\n"
code_construct += "):\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("problem_size_", i) + helper.var_idx("(problem_size_", i) + "),\n"
code_construct += " " + "grid_tiled_shape(grid_tiled_shape),\n"
code_construct += " " + "params_A0(ref_A0.layout()),\n"
code_construct += " " + "ref_A0(ref_A0),\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("params_B", i) + helper.var_idx("(ref_B", i) + ".layout()),\n"
code_construct += " " + helper.var_idx("ref_B", i) + helper.var_idx("(ref_B", i) + "),\n"
code_construct += " " + helper.var_idx("params_C", i) + helper.var_idx("(ref_C", i) + ".layout()),\n"
code_construct += " " + helper.var_idx("ref_C", i) + helper.var_idx("(ref_C", i) + "),\n"
code_construct += " " + helper.var_idx("params_D", self.b2bnum - 1) + helper.var_idx("(ref_D", self.b2bnum - 1) + ".layout()),\n"
code_construct += " " + helper.var_idx("ref_D", self.b2bnum - 1) + helper.var_idx("(ref_D", self.b2bnum - 1) + "),\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("output_op_", i) + helper.var_idx("(output_op_", i) + "), \n"
code_construct += " " + "batch_count(batch_count) {\n"
code_construct += " " + helper.var_idx("gemm_k_iterations_", 0) + helper.var_idx(" = (problem_size_", 0) + helper.var_idx(".k() + B2bMma::Shape", 0) + helper.var_idx("::kK - 1) / B2bMma::Shape", 0) + "::kK;\n"
code_construct += "}\n"
return code_default + code_construct
def gen_using(self):
code_using = ""
for i in range(self.b2bnum - 1):
code_using += " " + helper.var_idx("using OutputOp", i) + helper.var_idx(" = typename B2bMma::OutputOp", i) + ";\n"
code_using += " " + helper.var_idx("using OutputOp", self.b2bnum - 1) + " = typename Epilogue::OutputOp;\n"
for i in range(self.b2bnum - 1):
code_using += " " + helper.var_idx("using FusedAddBiasEpilogue", i) + helper.var_idx(" = typename B2bMma::FusedAddBiasEpilogue", i) +";\n"
code_using += " " + "using WarpCount0 = typename B2bMma::WarpCount0;\n"
code_using += " " + "static int const kThreadCount = 32 * WarpCount0::kCount;\n"
code_using += gen_ir.gen_struct("Params", self.gen_Params() + self.gen_Memberfunc())
code_using += "union SharedStorage {\n"
code_using += " " + "typename B2bMma::B2bMmaSharedStorage main_loop;\n"
code_using += " " + "typename Epilogue::SharedStorage epilogue;\n"
code_using += "};\n"
return code_using
def gen_can_implement(self):
gen_code = ""
return gen_code
def gen_operator_and_constr(self):
ctr_code = "CUTLASS_HOST_DEVICE\n"
ctr_code += self.gen_class_name + "() { } \n\n"
operator_code = "CUTLASS_DEVICE\n"
operator_code += "void operator()(Params const ¶ms, SharedStorage &shared_storage) {\n"
operator_code += " " + "ThreadblockSwizzle threadblock_swizzle;\n"
operator_code += " " + "cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.grid_tiled_shape);\n"
operator_code += " " + "int batch_idx = threadblock_tile_offset.k();\n"
operator_code += " " + "if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||\n"
operator_code += " " + "params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {\n"
operator_code += " " + " " + "return;\n"
operator_code += " " + "}\n"
operator_code += " " + "cutlass::MatrixCoord tb_offset_A0{\n"
operator_code += " " + " " + "threadblock_tile_offset.m() * B2bMma::Shape0::kM,\n"
operator_code += " " + " " + "0\n"
operator_code += " " + "};\n"
for i in range(self.b2bnum):
operator_code += " " + helper.var_idx("cutlass::MatrixCoord tb_offset_B", i) + "{\n"
operator_code += " " + " " + "0,\n"
operator_code += " " + " " + helper.var_idx("threadblock_tile_offset.n() * B2bMma::Shape", i) + "::kN\n"
operator_code += " " + "};\n"
operator_code += " " + "int thread_idx = threadIdx.x;\n\n"
operator_code += " " + "MatrixCoord threadblock_offset(\n"
operator_code += " " + " " + helper.var_idx("threadblock_tile_offset.m() * B2bMma::Shape", self.b2bnum - 1) + "::kM,\n"
operator_code += " " + " " + helper.var_idx("threadblock_tile_offset.n() * B2bMma::Shape", self.b2bnum - 1) + "::kN\n"
operator_code += " " + ");\n"
operator_code += " " + "typename B2bMma::IteratorA0 iterator_A0(\n"
operator_code += " " + " " + "params.params_A0,\n"
operator_code += " " + " " + "params.ref_A0.data(),\n"
operator_code += " " + " " + "params.problem_size_0.mk(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "tb_offset_A0);\n"
operator_code += " " + "iterator_A0.add_pointer_offset(batch_idx * params.problem_size_0.m() * params.problem_size_0.k());\n\n"
for i in range (self.b2bnum):
operator_code += " " + helper.var_idx("typename B2bMma::IteratorB", i ) + helper.var_idx(" iterator_B", i) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_B", i) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_B", i) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_", i) + ".kn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + helper.var_idx("tb_offset_B", i) + ");\n"
operator_code += " " + helper.var_idx("iterator_B", i) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", i) + helper.var_idx(".n() * params.problem_size_", i) + ".k());\n\n"
for i in range (self.b2bnum - 1):
operator_code += " " + helper.var_idx("typename FusedAddBiasEpilogue", i ) + helper.var_idx("::OutputTileIterator iterator_C", i) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_C", i) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_C", i) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_" , i) + ".mn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "threadblock_offset" + ");\n"
operator_code += " " + helper.var_idx("int ref_C", i) + helper.var_idx("_stride = params.ref_C", i) + ".stride()[0];\n"
operator_code += " " + helper.var_idx("iterator_C", i) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", i) + helper.var_idx(".n() * (ref_C", i) + helper.var_idx("_stride == 0 ? 1 : params.problem_size_", i) + ".m()));\n\n"
for i in range (self.b2bnum - 1):
operator_code += " " + helper.var_idx("FusedAddBiasEpilogue", i ) + helper.var_idx(" epilogue_", i ) + ";\n"
operator_code += " " + "int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);\n"
operator_code += " " + "int lane_idx = threadIdx.x % 32;\n"
for i in range (self.b2bnum - 1):
operator_code += " " + helper.var_idx("OutputOp", i) + helper.var_idx(" output_op_", i) + helper.var_idx("(params.output_op_", i) + ");\n"
operator_code += " " + "B2bMma b2bMma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);\n"
operator_code += " " + "typename B2bMma::FragmentC0 src_accum;\n"
operator_code += " " + helper.var_idx("typename B2bMma::FragmentC", self.b2bnum - 1)+ " accumulators;\n"
operator_code += " " + "src_accum.clear();\n"
operator_code += " " + "accumulators.clear();\n"
operator_code += " " + "b2bMma(params.gemm_k_iterations_0, accumulators, iterator_A0, "
for i in range(self.b2bnum):
operator_code += helper.var_idx("iterator_B", i) + ", "
operator_code += "src_accum"
if self.b2bnum != 1:
operator_code += ", "
for i in range(self.b2bnum - 1):
operator_code += helper.var_idx("output_op_", i) + ", "
for i in range(self.b2bnum - 1):
operator_code += helper.var_idx("epilogue_", i) + ", "
for i in range(self.b2bnum - 1):
final = ", "
if i == self.b2bnum - 2:
final =""
operator_code += helper.var_idx("iterator_C", i) + final
operator_code += ");\n"
operator_code += " " + helper.var_idx("OutputOp", self.b2bnum - 1) + helper.var_idx(" output_op_", self.b2bnum - 1) + helper.var_idx("(params.output_op_", self.b2bnum - 1) + ");\n"
operator_code += " " + "threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.grid_tiled_shape);\n"
operator_code += " " + helper.var_idx("typename Epilogue::OutputTileIterator iterator_C", self.b2bnum - 1) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_C", self.b2bnum - 1) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_C", self.b2bnum - 1) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_", self.b2bnum - 1) + ".mn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "threadblock_offset\n"
operator_code += " " + ");\n"
operator_code += " " + helper.var_idx("int ref_C", self.b2bnum - 1) + helper.var_idx("_stride = params.ref_C", self.b2bnum - 1) + ".stride()[0];\n"
operator_code += " " + helper.var_idx("iterator_C", self.b2bnum - 1) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", self.b2bnum - 1) + helper.var_idx(".n() * (ref_C", self.b2bnum - 1) + helper.var_idx("_stride == 0 ? 1 : params.problem_size_", self.b2bnum - 1) + ".m()));\n\n"
operator_code += " " + helper.var_idx("typename Epilogue::OutputTileIterator iterator_D", self.b2bnum - 1) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_D", self.b2bnum - 1) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_D", self.b2bnum - 1) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_", self.b2bnum - 1) + ".mn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "threadblock_offset\n"
operator_code += " " + ");\n"
operator_code += " " + helper.var_idx("iterator_D", self.b2bnum - 1) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", self.b2bnum - 1) + helper.var_idx(".n() * params.problem_size_", self.b2bnum - 1) + ".m());\n\n"
operator_code += " " + "Epilogue epilogue(\n"
operator_code += " " + " " + "shared_storage.epilogue,\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "warp_idx,\n"
operator_code += " " + " " + "lane_idx\n"
operator_code += " " + ");\n"
operator_code += " " + "epilogue("
operator_code += helper.var_idx("output_op_", self.b2bnum - 1) + ", "
operator_code += helper.var_idx("iterator_D", self.b2bnum - 1) + ", "
operator_code += "accumulators, "
operator_code += helper.var_idx("iterator_C", self.b2bnum - 1) + ");\n"
operator_code += "}\n"
return ctr_code + operator_code
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/matrix_coord.h\"
#include \"{cutlass_dir}cutlass/semaphore.h\"
'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_code(self):
template_param = []
template_param.append(("typename", "B2bMma"))
template_param.append(("typename", "Epilogue"))
template_param.append(("typename", "ThreadblockSwizzle"))
template_param.append((bool, "SplitKSerial"))
code_body = ""
code_body += self.gen_using()
code_body += self.gen_operator_and_constr()
struct_code = gen_ir.gen_template_struct(self.gen_class_name, template_param, code_body)
code = self.gen_include_header()
code += gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("kernel", struct_code)))
return self.gen_include_header() + code
class gen_kernel:
def __init__(self, template_param, gen_class_name, b2b_num, output_dir, cutlass_deps_root, project_root):
self.template_param = template_param
self.gen_class_name = "B2bGemm"
self.gen_kernel_name = gen_class_name + "Kernel"
self.template_args = []
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
self.gen_default_b2b_gemm = gen_default_Gemm(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
self.gen_Kerenl = gen_Kernel(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
# Include gen_threadBlock
self.gen_threadBlock = gen_tb.gen_threadblock(template_param, gen_class_name, b2b_num, output_dir, cutlass_deps_root, project_root)
self.file_dir = output_dir + "/kernel/"
def gen_code(self, first_use_1stage):
default_b2b_gemm = self.gen_default_b2b_gemm.gen_code()
print("[INFO]: Gen kernel code [default_b2b_gemm.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "default_b2b_gemm.h", "w+") as f:
f.write(default_b2b_gemm)
kernel = self.gen_Kerenl.gen_code()
print("[INFO]: Gen kernel code [b2b_gemm.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "b2b_gemm.h", "w+") as f:
f.write(kernel)
# Call code to gen threadblock
self.gen_threadBlock.gen_code(first_use_1stage)
| examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_kernel.py/0 | {
"file_path": "examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_kernel.py",
"repo_id": "examples",
"token_count": 11319
} | 7 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Example of a GETT targeting Hopper tensor cores using the CUTLASS 3.x API.
CUTLASS has long provided implementations of Generalized Matrix times Matrix (GEMM) kernels.
However, a plethora of workloads compute on higher ranked tensors. Products of such tensors,
called tensor contractions, can be executed as multiple batched GEMMs, however, they can be
further accelerated with kernels that natively operate on these higher ranked tensors to
perform Generalized Tensor times Tensor contractions (GETT). CuTe's hierarchical layouts
and CUTLASS 3.0's unified micro-kernels make implementation of GETTs trivial. In this example,
we show how CUTLASS 3.0, CuTe, and Hopper's TMA feature together can accelerate GETTs while
making the process of authoring custom GETT kernels easier than ever before.
The modes of a tensor that participate in a GETT can be fundamentally grouped into four
semantic categories. The contraction modes (or K-modes) only appear in the A and B (left and right)
inputs but not in the C output tensor. Row modes (or M-modes) only appear in the left
input tensor (A) and the output tensor (C). Column modes (or N-modes) only appear in the
right (B) input tensor and the output tensor (C). Batch modes (or L-modes) appear in all
input and output tensors. If we fold the many modes of a tensor contraction into these four
categories, it would allow us to represent the input and output tensors as rank-3 "matrices"
that can be computed upon as if we were computing a batched GEMM!
This is exactly what CuTe's hierarchical layout representation allows us to do! Instead of having
simple integers as strides for these four modes, we can have nested strides for each of these
semantic categories that themselves have multiple modes within them -- multi-mode strides!
In CUTLASS 3.0, all one has to do to take advantage of this capability is to substitute the
required multi-mode strides instead of the default ones provided by gemm::detail::TagToStrideX.
In the following example, we illustrate how every Hopper GEMM in CUTLASS 3.0 is a GETT in disguise.
We begin by defining the four modes detailed above as Row, Col (column), Red (reduction), and
Bat (batch) strides, which we then nest for each of the in/out tensors to create our rank-3 stride
tuples. Note that although we do not define the problem shape type explicitely, it too remains a
rank-4 shape tuple just like any other batched GEMM, but instead with multi-mode shapes for each
of the four corresponding multi-modes within it. After this, the same CollectiveMma and
CollectiveBuilder we describe in examples 50 and 49 are used to create our kernel type. Nothing
else changes from a user's point of view. Note that multi-mode strides do not affect our
specializations in any way -- the lexical spelling of our kernels remains the same. The
only difference between a CUTLASS 3 batched GEMM and GETT are the instaced CuTe Layouts.
CollectiveBuilders rely on detecting the static-1 in the stride tuples to determine the major mode,
which is what the example demonstrates. However, it is possible to have all modes be dynamic as well
if the user assembles a CollectiveMma manually and ensures that the runtime strides are compatible
with the static micro-kernel of the collective (TiledMma, TiledCopy, and smem layouts). On the other
hand, a user can have more than one static stride too (which need not correspond to the major mode).
In particular, this example demonstrates a GETT where the 0th M-mode (M0) in A and the 0th K-mode (K0)
in B are major. All other combinations of major modes are supported, with the exception of mixed
K-major scenarios where both A and B are K-major (e.g. K0 is major in A but K1 is major in B).
NVIDIA Hopper architecture's TMA feature makes the predictaion required to implement these complicated
kernels trivial, as it is all handled by TMA itself without requiring any programmer effort.
Example executions, where the stride order defines the major-order (major on the left):
51_hopper_gett --modeC=m,n,l --modeA=m,k,l --modeB=k,n,l --extents=m:4096,n:4096,k:4096
51_hopper_gett --modeC=l,m,n --modeA=m,l,k --modeB=k,n,l --extents=m:128,n:128,k:128,l:64
51_hopper_gett --modeC=m,a,b,p,q,n,l --modeA=m,l,b,k,a --modeB=k,n,p,q,l --extents=m:32,a:32,b:3,n:128,k:128,l:4,p:3,q:3
*/
#include "gett_kernel.cuh"
#include "thrust/host_vector.h"
#include "thrust/device_vector.h"
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/util/gett_commandline.hpp"
#include "cutlass/util/reference/device/gett.hpp"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/print_error.hpp"
namespace example {
// Returns true if the left-most value in the tuple is statically known to be 1
template<class Stride>
constexpr bool
is_left_major() {
// Account for stride types with and without batch mode and batch modes with static zero stride
return cute::is_constant<1, decltype(cute::size<0,0>(Stride{}))>::value;
}
// Same as cute::make_int_tuple but inserts a major stride (Int<1>) for the leftmost mode if required
template <int Rank, bool IsMajor, class Indexable>
static constexpr
auto
make_stride_tuple(Indexable const& t, int n, int64_t init_default = 0) {
static_assert(Rank > 1);
if constexpr (IsMajor) {
return cute::transform(cute::make_seq<Rank>{}, [&](auto i) {
if constexpr (i == 0) {
return cute::Int<1>{};
}
else {
return i < n ? t[i] : init_default;
}
});
}
else {
return cute::make_int_tuple<Rank>(t, n, init_default);
}
}
} // namespace example
//////////////////////////////////////////////////////////////////////////////
int
main(int argc, char const* argv[]) {
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
using namespace cute;
if (argc != 5) {
std::cout << "Number of command line args must be 4.\n";
cutlass::GettCommandLine::print_usage();
return 0;
}
//
// Define the stride types for A, B, C, and D
//
// Stride for A (left input). If reduction mode is major, same must be major in B
// For this example, M0 is major in A.
using RowModeStridesA = cute::Stride<cute::Int<1>, int64_t, int64_t, int64_t>;
using RedModeStridesA = cute::Stride<int64_t, int64_t, int64_t>;
using BatModeStridesA = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
// Stride for B (right input). If reduction mode is major, same must be major in A
// For this example, K0 is major in B.
using ColModeStridesB = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
using RedModeStridesB = cute::Stride<cute::Int<1>, int64_t, int64_t>;
using BatModeStridesB = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
// Strides for output, which can all be dynamic.
using RowModeStridesC = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
using ColModeStridesC = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
using BatModeStridesC = cute::Stride<int64_t, int64_t, int64_t, int64_t>;
// Assmble our rank-3 multi-mode strides for the in/out tensors
using StrideA = cute::Stride<RowModeStridesA, RedModeStridesA, BatModeStridesA>;
using StrideB = cute::Stride<ColModeStridesB, RedModeStridesB, BatModeStridesB>;
using StrideC = cute::Stride<RowModeStridesC, ColModeStridesC, BatModeStridesC>;
// Note: C and D share strides here for simplicity.
// In general, they need not have the same layout.
using StrideD = StrideC;
//
// Define element types for tensors and intermediate values
//
using ElementA = cutlass::half_t;
using ElementB = cutlass::half_t;
using ElementC = cutlass::half_t;
using ElementD = float;
using ElementAccumulator = float;
using ElementEpilogue = float;
// The following constexpr values set the max number of modes in each MNKL mode
constexpr int MaxRank_M = cute::rank(RowModeStridesA{}); // Max row modes
constexpr int MaxRank_N = cute::rank(ColModeStridesB{}); // Max column modes
constexpr int MaxRank_K = cute::rank(RedModeStridesA{}); // Max contraction modes
constexpr int MaxRank_L = cute::rank(BatModeStridesA{}); // Max batch modes
static_assert(cute::rank(RowModeStridesA{}) == cute::rank(RowModeStridesC{}));
static_assert(cute::rank(ColModeStridesB{}) == cute::rank(RowModeStridesC{}));
static_assert(cute::rank(RedModeStridesA{}) == cute::rank(RedModeStridesB{}));
static_assert(cute::rank(BatModeStridesA{}) == cute::rank(BatModeStridesC{}));
static_assert(cute::rank(BatModeStridesB{}) == cute::rank(BatModeStridesC{}));
// Parse command line to get modes, extents, and strides
cutlass::GettCommandLine cmd;
auto parsed_args = cmd.parse(argc, argv, true);
auto& m = parsed_args.M;
auto& ldAm = parsed_args.ldAm;
auto& ldCm = parsed_args.ldCm;
int rank_m = int(m.size());
auto& n = parsed_args.N;
auto& ldBn = parsed_args.ldBn;
auto& ldCn = parsed_args.ldCn;
int rank_n = int(n.size());
auto& k = parsed_args.K;
auto& ldAk = parsed_args.ldAk;
auto& ldBk = parsed_args.ldBk;
int rank_k = int(k.size());
auto& l = parsed_args.L;
auto& ldAl = parsed_args.ldAl;
auto& ldBl = parsed_args.ldBl;
auto& ldCl = parsed_args.ldCl;
int rank_l = int(l.size());
if ((rank_m > MaxRank_M) || (rank_n > MaxRank_N) || (rank_k > MaxRank_K) || (rank_l > MaxRank_L)) {
std::cerr << "ERROR: Input has more modes than statically configured.";
return 1;
}
// Check that the user input major stride match the static major strides.
if (example::is_left_major<RowModeStridesA>() && (ldAm[0] != 1)) {
std::cerr << "ERROR: A_M0 is expected to be major, but was not in the provided input!\n";
return 1;
}
if (example::is_left_major<RedModeStridesA>() && (ldAk[0] != 1)) {
std::cerr << "ERROR: A_K0 is expected to be major, but was not in the provided input!\n";
return 1;
}
if (example::is_left_major<ColModeStridesB>() && (ldBn[0] != 1)) {
std::cerr << "ERROR: B_N0 is expected to be major, but was not in the provided input!\n";
return 1;
}
if (example::is_left_major<RedModeStridesB>() && (ldBk[0] != 1)) {
std::cerr << "ERROR: B_K0 is expected to be major, but was not in the provided input!\n";
return 1;
}
// Convert to `cute::Tuple`s and set up arguments
auto M = make_int_tuple<MaxRank_M>(m.data(), rank_m, 1);
auto dAm = example::make_stride_tuple<MaxRank_M, example::is_left_major<RowModeStridesA>()>(ldAm.data(), rank_m);
auto dCm = example::make_stride_tuple<MaxRank_M, example::is_left_major<RowModeStridesC>()>(ldCm.data(), rank_m);
auto N = make_int_tuple<MaxRank_N>(n.data(), rank_n, 1);
auto dBn = example::make_stride_tuple<MaxRank_N, example::is_left_major<ColModeStridesB>()>(ldBn.data(), rank_n);
auto dCn = example::make_stride_tuple<MaxRank_N, example::is_left_major<ColModeStridesC>()>(ldCn.data(), rank_n);
auto K = make_int_tuple<MaxRank_K>(k.data(), rank_k, 1);
auto dAk = example::make_stride_tuple<MaxRank_K, example::is_left_major<RedModeStridesA>()>(ldAk.data(), rank_k);
auto dBk = example::make_stride_tuple<MaxRank_K, example::is_left_major<RedModeStridesB>()>(ldBk.data(), rank_k);
auto L = make_int_tuple<MaxRank_L>(l.data(), rank_l, 1);
auto dAl = make_int_tuple<MaxRank_L>(ldAl.data(), rank_l, 0);
auto dBl = make_int_tuple<MaxRank_L>(ldBl.data(), rank_l, 0);
auto dCl = make_int_tuple<MaxRank_L>(ldCl.data(), rank_l, 0);
// Concat tuples to turn it into rank-4 problem shape and rank-3 strides, just like GEMM
auto problem_shape = make_shape(M, N, K, L);
StrideA stride_A = make_stride(dAm, dAk, dAl);
StrideB stride_B = make_stride(dBn, dBk, dBl);
StrideC stride_C = make_stride(dCm, dCn, dCl);
StrideD stride_D = stride_C;
auto alpha = ElementEpilogue(1.0f);
auto beta = ElementEpilogue(1.0f);
//
// Allocate and init tensors
//
auto M_size = std::accumulate(std::begin(m), std::end(m), 1, std::multiplies<>{});
auto N_size = std::accumulate(std::begin(n), std::end(n), 1, std::multiplies<>{});
auto K_size = std::accumulate(std::begin(k), std::end(k), 1, std::multiplies<>{});
auto L_size = std::accumulate(std::begin(l), std::end(l), 1, std::multiplies<>{});
thrust::host_vector<ElementA> h_A(M_size * K_size * L_size);
thrust::host_vector<ElementB> h_B(N_size * K_size * L_size);
thrust::host_vector<ElementC> h_C(M_size * N_size * L_size);
thrust::host_vector<ElementD> h_D(M_size * N_size * L_size);
// Note: the cast to int here is to avoid false-negative ref-checks which can
// occur due to floating point arithmetic not being purely associative.
for (auto& a : h_A) a = ElementA(int(4*(rand() / double(RAND_MAX)) - 1));
for (auto& b : h_B) b = ElementB(int(4*(rand() / double(RAND_MAX)) - 1));
for (auto& c : h_C) c = ElementC(int(4*(rand() / double(RAND_MAX)) - 1));
for (auto& d : h_D) d = ElementD(-1);
thrust::device_vector<ElementA> d_A = h_A;
thrust::device_vector<ElementB> d_B = h_B;
thrust::device_vector<ElementC> d_C = h_C;
thrust::device_vector<ElementD> cutlass_result = h_D;
thrust::device_vector<ElementD> reference_result = h_D;
//
// Compute GETT
//
auto status = example::gett_kernel(
problem_shape,
d_A.data().get(), stride_A,
d_B.data().get(), stride_B,
ElementAccumulator{},
d_C.data().get(), stride_C,
cutlass_result.data().get(), stride_D,
alpha, beta);
if (cutlass::Status::kSuccess != status) {
std::cerr << "ERROR: GETT operator launch failed.\n";
return 1;
}
auto cuda_err = cudaDeviceSynchronize();
if (cudaSuccess != cuda_err) {
std::cerr << "ERROR: GETT operator execution failed. with error :";
std::cerr << cudaGetErrorString(cuda_err) << "\n";
return 1;
}
//
// Verify
//
cutlass::reference::device::gett(
problem_shape,
d_A.data().get(), stride_A,
d_B.data().get(), stride_B,
ElementAccumulator{},
d_C.data().get(), stride_C,
reference_result.data().get(), stride_D,
alpha, beta);
cuda_err = cudaDeviceSynchronize();
if (cudaSuccess != cuda_err) {
std::cerr << "ERROR: GETT reference execution failed. with error :";
std::cerr << cudaGetErrorString(cuda_err) << "\n";
return 1;
}
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::device::BlockCompareEqual(
reference_result.data().get(), cutlass_result.data().get(), cutlass_result.size());
if (passed) {
std::cout << "GETT verification passed.\n";
return 0;
}
else {
std::cerr << "ERROR: GETT verification failed! Printing detailed stats.\n";
h_D = reference_result;
thrust::host_vector<ElementD> h_cutlass_result = cutlass_result;
print_relative_error(h_cutlass_result.size(), h_cutlass_result.data(), h_D.data());
std::cout << "StrideA: "; print(stride_A); std::cout << '\n';
std::cout << "StrideB: "; print(stride_B); std::cout << '\n';
std::cout << "StrideC: "; print(stride_C); std::cout << '\n';
std::cout << "StrideD: "; print(stride_D); std::cout << '\n';
return 1;
}
#else
std::cerr << "Unsupported example. Please ensure CUTLASS_ARCH_MMA_SM90_SUPPORTED is defined.\n";
return 0;
#endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
}
| examples/51_hopper_gett/51_hopper_gett.cu/0 | {
"file_path": "examples/51_hopper_gett/51_hopper_gett.cu",
"repo_id": "examples",
"token_count": 6010
} | 8 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cute/layout.hpp"
#include "cute/tensor.hpp"
#include "cute/util/print.hpp"
namespace example {
using namespace cute;
// Empty type used to disable gather/scatter for a GEMM argument
struct NoGather
{
template<class... Ts>
NoGather(Ts...) {};
};
/// Function object that applies an index to its argument
template <class Index>
struct IndexedGather
{
CUTE_HOST_DEVICE constexpr
IndexedGather(Index const *indices = {}): indices_(indices) {}
template <typename I>
CUTE_HOST_DEVICE constexpr
Index
operator()(I i) const { return indices_[i]; }
CUTE_HOST_DEVICE friend
void
print(IndexedGather const &s) {
cute::print("Indexed");
}
Index const *indices_;
};
/// Function object that applies a stride to its argument
/// Example: StridedFunc<int,_2> gathers every other row/column
template <class Stride>
struct StridedGather
{
CUTE_HOST_DEVICE constexpr
StridedGather(Stride stride = {}): stride_(stride) {}
template <class I>
CUTE_HOST_DEVICE constexpr
auto
operator()(I i) const { return i * stride_; }
CUTE_HOST_DEVICE friend
void
print(StridedGather const &s) {
cute::print("Strided{");
print(s.stride_);
cute::print("}");
}
Stride stride_;
};
/// Custom stride object that applies a function followed by a stride
template <class Func, class Stride>
struct CustomStride
{
CUTE_HOST_DEVICE constexpr
CustomStride(Func const &func, Stride const &stride): func_(func), stride_(stride) {}
template <class I>
CUTE_HOST_DEVICE constexpr friend
auto
operator*(I i, CustomStride const &s) { return s.func_(i) * s.stride_; }
template <class I>
CUTE_HOST_DEVICE constexpr friend
auto
operator*(CustomStride const &s, I i) { return s.func_(i) * s.stride_; }
CUTE_HOST_DEVICE friend
void
print(CustomStride const & s) {
cute::print("Custom{");
print(s.func_);
cute::print(",");
print(s.stride_);
cute::print("}");
}
template<class Div>
CUTE_HOST_DEVICE constexpr friend
auto
safe_div(CustomStride const &s, Div const &div)
{
return CustomStride<Func, decltype(safe_div(s.stride_, div))>(s.func_, safe_div(s.stride_, div));
}
// Circumvent the requirement on make_layout that shape and stride are integral
template <class Shape>
CUTE_HOST_DEVICE constexpr friend
auto
make_layout(Shape const &shape, CustomStride const &stride)
{
return Layout<Shape, CustomStride>(shape, stride);
}
Func func_;
Stride stride_;
};
template<class Stride, class Func>
CUTLASS_HOST_DEVICE
auto
make_custom_stride_layout(Stride const &stride, Func&& func)
{
// Use a dummy shape and replace the first non-unit stride with a custom gather stride
auto idx = find_if(stride, [](auto x){ return not is_constant<1, decltype(x)>{}; });
constexpr int I = decltype(idx)::value;
return make_layout(repeat_like(stride, _1{}),
replace<I>(stride, CustomStride{static_cast<Func&&>(func), get<I>(stride)}));
}
/// Helper function to optionally create a gather tensor
template<class Iterator, class Shape, class Stride, class Func>
CUTLASS_HOST_DEVICE
auto
make_gather_tensor(Iterator iter, Shape const &shape, Stride const &stride, Func &&func)
{
if constexpr (not cutlass::platform::is_same<remove_cvref_t<Func>, NoGather>::value) {
Layout matrix_layout = make_identity_layout(shape);
auto offset = as_arithmetic_tuple(repeat_like(shape, _0{}));
Layout gather_layout = make_custom_stride_layout(stride, static_cast<Func&&>(func));
return make_tensor(iter, ComposedLayout{gather_layout, offset, matrix_layout});
} else {
return make_tensor(iter, shape, stride);
}
}
} // namespace example
namespace cute
{
template<int N, int I, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
upcast(Shape const& shape, Stride const& stride)
{
if constexpr (is_tuple<Shape>::value) {
return transform_layout(shape, stride, [](auto const& s, auto const& d) { return upcast<N,I>(s,d); });
} else if constexpr (is_scaled_basis<Stride>::value) {
if constexpr (Stride::mode() == I) {
return make_layout(shape_div(shape, Int<N>{}), shape_div(stride, Int<N>{}));
} else {
return make_layout(shape, stride);
}
} else {
return upcast<N>(shape, stride);
}
CUTE_GCC_UNREACHABLE;
}
template <int N, class OuterShape, class OuterStride, class Offset, class Shape, class Stride>
CUTE_HOST_DEVICE constexpr
auto
upcast(ComposedLayout<Layout<OuterShape,OuterStride>,Offset,Layout<Shape,Stride>> const& layout)
{
// Find index of the stride-1 mode - that is the only one that requires updating inner shape and offset
auto idx = find_if(layout.layout_a().stride(), [](auto x){ return is_constant<1, decltype(x)>{}; });
constexpr int I = decltype(idx)::value;
// Upcast the outer layout (works as expected)
auto outer = upcast<N>(layout.layout_a());
// Upcast the accumulated offset along stride-1 mode
auto offset = as_arithmetic_tuple(replace<I>(layout.offset(), upcast<N>(get<I>(layout.offset()))));
// Upcast the inner layout's shape along stride-1 mode
auto inner = upcast<N,I>(layout.layout_b().shape(), layout.layout_b().stride());
return composition(outer, offset, inner);
}
} // namespace example
| examples/common/gather_tensor.hpp/0 | {
"file_path": "examples/common/gather_tensor.hpp",
"repo_id": "examples",
"token_count": 2342
} | 9 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/config.hpp>
#include <cute/arch/copy.hpp>
// Config
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) && (__CUDACC_VER_MAJOR__ >= 12))
# define CUTE_ARCH_STSM_SM90_ENABLED
# define CUTE_ARCH_TMA_SM90_ENABLED
#endif
#if defined(CUTE_ARCH_TMA_SM90_ENABLED) && \
((__CUDACC_VER_MAJOR__ > 12) || ((__CUDACC_VER_MAJOR__ == 12) && (__CUDACC_VER_MINOR__ >= 3)))
# define CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED
#endif
namespace cute
{
struct SM90_U32x1_STSM_N
{
using SRegisters = uint32_t[1];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src,
uint128_t & smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x1.m8n8.shared.b16 [%0], {%1};\n"
:: "r"(smem_int_ptr),
"r"(src));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
struct SM90_U32x2_STSM_N
{
using SRegisters = uint32_t[2];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src0, uint32_t const& src1,
uint128_t& smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x2.m8n8.shared.b16 [%0], {%1, %2};\n"
:: "r"(smem_int_ptr),
"r"(src0), "r"(src1));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
struct SM90_U32x4_STSM_N
{
using SRegisters = uint32_t[4];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src0, uint32_t const& src1, uint32_t const& src2, uint32_t const& src3,
uint128_t& smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x4.m8n8.shared.b16 [%0], {%1, %2, %3, %4};\n"
:: "r"(smem_int_ptr),
"r"(src0), "r"(src1), "r"(src2), "r"(src3));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
struct SM90_U16x2_STSM_T
{
using SRegisters = uint32_t[1];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src,
uint128_t& smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x1.trans.m8n8.shared.b16 [%0], {%1};\n"
:: "r"(smem_int_ptr),
"r"(src));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
struct SM90_U16x4_STSM_T
{
using SRegisters = uint32_t[2];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src0, uint32_t const& src1,
uint128_t& smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x2.trans.m8n8.shared.b16 [%0], {%1, %2};\n"
:: "r"(smem_int_ptr),
"r"(src0), "r"(src1));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
struct SM90_U16x8_STSM_T
{
using SRegisters = uint32_t[4];
using DRegisters = uint128_t[1];
CUTE_HOST_DEVICE static void
copy(uint32_t const& src0, uint32_t const& src1, uint32_t const& src2, uint32_t const& src3,
uint128_t& smem_dst)
{
#if defined(CUTE_ARCH_STSM_SM90_ENABLED)
uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst);
asm volatile ("stmatrix.sync.aligned.x4.trans.m8n8.shared.b16 [%0], {%1, %2, %3, %4};\n"
:: "r"(smem_int_ptr),
"r"(src0), "r"(src1), "r"(src2), "r"(src3));
#else
CUTE_INVALID_CONTROL_PATH("Trying to use stmatrix without CUTE_ARCH_STSM_SM90_ENABLED.");
#endif
}
};
//
// Legacy STSM interfaces that aren't very useful
//
template <class T>
CUTE_HOST_DEVICE
void
copy_stsm(T const* const rmem_ptr,
uint128_t* const smem_ptr)
{
uint32_t const* reg_ptr = reinterpret_cast<uint32_t const*>(rmem_ptr);
// if constexpr
if (sizeof(T) == 4) {
SM90_U32x1_STSM_N::copy(reg_ptr[0], smem_ptr[0]);
}
else if (sizeof(T) == 8) {
SM90_U32x2_STSM_N::copy(reg_ptr[0], reg_ptr[1], smem_ptr[0]);
}
else if (sizeof(T) == 16) {
SM90_U32x4_STSM_N::copy(reg_ptr[0], reg_ptr[1], reg_ptr[2], reg_ptr[3], smem_ptr[0]);
}
else {
static_assert(sizeof(T) == 4 || sizeof(T) == 8 || sizeof(T) == 16, "sizeof(T) is not supported");
}
}
template <class T>
CUTE_HOST_DEVICE
void
copy_stsm_trans(T const* const rmem_ptr,
uint128_t* const smem_ptr)
{
uint32_t const* reg_ptr = reinterpret_cast<uint32_t const*>(rmem_ptr);
// if constexpr
if (sizeof(T) == 4) {
SM90_U16x2_STSM_T::copy(reg_ptr[0], smem_ptr[0]);
}
else if (sizeof(T) == 8) {
SM90_U16x4_STSM_T::copy(reg_ptr[0], reg_ptr[1], smem_ptr[0]);
}
else if (sizeof(T) == 16) {
SM90_U16x8_STSM_T::copy(reg_ptr[0], reg_ptr[1], reg_ptr[2], reg_ptr[3], smem_ptr[0]);
}
else {
static_assert(sizeof(T) == 4 || sizeof(T) == 8 || sizeof(T) == 16, "sizeof(T) is not supported");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // end namespace cute
////////////////////////////////////////////////////////////////////////////////////////////////////
#include <cute/arch/copy_sm90_desc.hpp>
#include <cute/arch/copy_sm90_tma.hpp>
////////////////////////////////////////////////////////////////////////////////////////////////////
| include/cute/arch/copy_sm90.hpp/0 | {
"file_path": "include/cute/arch/copy_sm90.hpp",
"repo_id": "include",
"token_count": 3157
} | 10 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cute/arch/copy_sm80.hpp>
#include <cute/atom/copy_traits.hpp>
#include <cute/layout.hpp>
namespace cute
{
template <class S, class D>
struct Copy_Traits<SM80_CP_ASYNC_CACHEALWAYS<S,D>>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,Int<sizeof_bits<S>::value>>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,Int<sizeof_bits<D>::value>>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// Construct a zfill variant with a given predicate value
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM80_CP_ASYNC_CACHEALWAYS_ZFILL<S,D>>
with(bool pred) const {
return {pred};
}
};
template <class S, class D>
struct Copy_Traits<SM80_CP_ASYNC_CACHEGLOBAL<S,D>>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,Int<sizeof_bits<S>::value>>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,Int<sizeof_bits<D>::value>>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// Construct a zfill variant with a given predicate value
CUTE_HOST_DEVICE constexpr
Copy_Traits<SM80_CP_ASYNC_CACHEGLOBAL_ZFILL<S,D>>
with(bool pred) const {
return {pred};
}
};
template <class S, class D>
struct Copy_Traits<SM80_CP_ASYNC_CACHEALWAYS_ZFILL<S,D>>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,Int<sizeof_bits<S>::value>>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,Int<sizeof_bits<D>::value>>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// Predicate value that determines whether to load or zfill
bool pred = false;
// Overload copy_unpack for zfill variant to pass the predicate into the op
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr
void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_gmem<TS>::value, "Expected gmem source for cp.async.");
static_assert(is_smem<TD>::value, "Expected smem destination for cp.async.");
Tensor rS = recast<S>(src);
Tensor rD = recast<D>(dst);
CUTE_STATIC_ASSERT_V(size(rS) == Int<1>{},
"In CopyAtom, src layout doesn't vectorize into registers. This src layout is incompatible with this tiled copy.");
CUTE_STATIC_ASSERT_V(size(rD) == Int<1>{},
"In CopyAtom, dst layout doesn't vectorize into registers. This dst layout is incompatible with this tiled copy.");
SM80_CP_ASYNC_CACHEALWAYS_ZFILL<S,D>::copy(rS[0], rD[0], traits.pred);
}
};
template <class S, class D>
struct Copy_Traits<SM80_CP_ASYNC_CACHEGLOBAL_ZFILL<S,D>>
{
// Logical thread id to thread idx (one-thread)
using ThrID = Layout<_1>;
// Map from (src-thr,src-val) to bit
using SrcLayout = Layout<Shape<_1,Int<sizeof_bits<S>::value>>>;
// Map from (dst-thr,dst-val) to bit
using DstLayout = Layout<Shape<_1,Int<sizeof_bits<D>::value>>>;
// Reference map from (thr,val) to bit
using RefLayout = SrcLayout;
// Predicate value that determines whether to load or zfill
bool pred = false;
// Overload copy_unpack for zfill variant to pass the predicate into the op
template <class TS, class SLayout,
class TD, class DLayout>
CUTE_HOST_DEVICE friend constexpr
void
copy_unpack(Copy_Traits const& traits,
Tensor<TS,SLayout> const& src,
Tensor<TD,DLayout> & dst)
{
static_assert(is_gmem<TS>::value, "Expected gmem source for cp.async.");
static_assert(is_smem<TD>::value, "Expected smem destination for cp.async.");
Tensor rS = recast<S>(src);
Tensor rD = recast<D>(dst);
CUTE_STATIC_ASSERT_V(size(rS) == Int<1>{},
"In CopyAtom, src layout doesn't vectorize into registers. This src layout is incompatible with this tiled copy.");
CUTE_STATIC_ASSERT_V(size(rD) == Int<1>{},
"In CopyAtom, dst layout doesn't vectorize into registers. This dst layout is incompatible with this tiled copy.");
SM80_CP_ASYNC_CACHEGLOBAL_ZFILL<S,D>::copy(rS[0], rD[0], traits.pred);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Element copy selector
template <class SrcTensor, class DstTensor>
CUTE_HOST_DEVICE constexpr
auto
select_elementwise_copy(SrcTensor const&, DstTensor const&)
{
using SrcType = typename SrcTensor::value_type;
using DstType = typename DstTensor::value_type;
#if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED)
if constexpr (is_gmem<SrcTensor>::value && is_smem<DstTensor>::value &&
sizeof(SrcType) == sizeof(DstType) &&
(sizeof(SrcType) == 4 || sizeof(SrcType) == 8 || sizeof(SrcType) == 16))
{
return SM80_CP_ASYNC_CACHEALWAYS<SrcType,DstType>{};
} else {
return UniversalCopy<SrcType,DstType>{};
}
CUTE_GCC_UNREACHABLE;
#else
return UniversalCopy<SrcType,DstType>{};
#endif
}
}
| include/cute/atom/copy_traits_sm80.hpp/0 | {
"file_path": "include/cute/atom/copy_traits_sm80.hpp",
"repo_id": "include",
"token_count": 2594
} | 11 |
/***************************************************************************************************
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <vector_types.h>
#include <cutlass/numeric_types.h>
#include <cutlass/numeric_size.h>
#include <cute/numeric/int.hpp>
#include <cute/numeric/real.hpp>
namespace cute {
template <typename T>
struct sizeof_bits : public cutlass::sizeof_bits<T> {};
// DO NOT change auto to int, sizeof_bits<sparse_elem> use integral_ratio instead of int
template <class T>
static constexpr auto sizeof_bits_v = sizeof_bits<T>::value;
using cutlass::bits_to_bytes;
using cutlass::is_subbyte;
template <class T>
static constexpr auto is_subbyte_v = is_subbyte<T>::value;
using cutlass::half_t;
using cutlass::bfloat16_t;
using cutlass::tfloat32_t;
// Umbrella floating-point 8-bit data type : type_erased_dynamic_float8_t
// This umbrella datatype can be enabled when a user provides a specific
// datatype in runtime argument list.
using cutlass::type_erased_dynamic_float8_t;
using cutlass::float_e4m3_t;
using cutlass::float_e5m2_t;
using cutlass::uint1b_t;
using cutlass::int2b_t;
using cutlass::uint2b_t;
using cutlass::int4b_t;
using cutlass::uint4b_t;
using cutlass::bin1_t;
} // end namespace cute
| include/cute/numeric/numeric_types.hpp/0 | {
"file_path": "include/cute/numeric/numeric_types.hpp",
"repo_id": "include",
"token_count": 877
} | 12 |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 29